Merge branch 'linus' into perf/urgent, to synchronize with upstream

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2020-02-05 08:44:22 +01:00
3382 changed files with 217322 additions and 59133 deletions

View File

@@ -53,6 +53,7 @@
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
* bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask)
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
@@ -133,6 +134,9 @@ extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned int first, unsigned int cut,
unsigned int nbits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
@@ -456,6 +460,41 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
static inline void bitmap_next_clear_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
{
*rs = find_next_zero_bit(bitmap, end, *rs);
*re = find_next_bit(bitmap, end, *rs + 1);
}
static inline void bitmap_next_set_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
/*
* Bitmap region iterators. Iterates over the bitmap between [@start, @end).
* @rs and @re should be integer variables and will be set to start and end
* index of the current clear or set region.
*/
#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \
for ((rs) = (start), \
bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \
(rs) < (re); \
(rs) = (re) + 1, \
bitmap_next_clear_region((bitmap), &(rs), &(re), (end)))
#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \
for ((rs) = (start), \
bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \
(rs) < (re); \
(rs) = (re) + 1, \
bitmap_next_set_region((bitmap), &(rs), &(re), (end)))
/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value

View File

@@ -85,6 +85,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
struct bpf_prog *replace_prog,
enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type);
@@ -93,7 +94,8 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags);
struct bpf_prog *replace_prog, enum bpf_attach_type type,
u32 flags);
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags);
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,

View File

@@ -17,6 +17,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/module.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -43,6 +44,15 @@ struct bpf_map_ops {
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
void (*map_release_uref)(struct bpf_map *map);
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_lookup_and_delete_batch)(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -106,6 +116,7 @@ struct bpf_map {
struct btf *btf;
struct bpf_map_memory memory;
char name[BPF_OBJ_NAME_LEN];
u32 btf_vmlinux_value_type_id;
bool unpriv_array;
bool frozen; /* write-once; write-protected by freeze_mutex */
/* 22 bytes hole */
@@ -183,7 +194,8 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
return map->btf && map->ops->map_seq_show_elem;
return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
map->ops->map_seq_show_elem;
}
int map_check_no_btf(const struct bpf_map *map,
@@ -349,6 +361,10 @@ struct bpf_verifier_ops {
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
int (*btf_struct_access)(struct bpf_verifier_log *log,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id);
};
struct bpf_prog_offload_ops {
@@ -437,7 +453,8 @@ struct btf_func_model {
* fentry = a set of program to run before calling original function
* fexit = a set of program to run after original function
*/
int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
int arch_prepare_bpf_trampoline(void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_prog **fentry_progs, int fentry_cnt,
struct bpf_prog **fexit_progs, int fexit_cnt,
void *orig_call);
@@ -448,7 +465,8 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
enum bpf_tramp_prog_type {
BPF_TRAMP_FENTRY,
BPF_TRAMP_FEXIT,
BPF_TRAMP_MAX
BPF_TRAMP_MAX,
BPF_TRAMP_REPLACE, /* more than MAX */
};
struct bpf_trampoline {
@@ -463,6 +481,11 @@ struct bpf_trampoline {
void *addr;
bool ftrace_managed;
} func;
/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
* program by replacing one of its functions. func.addr is the address
* of the function it replaced.
*/
struct bpf_prog *extension_prog;
/* list of BPF programs using this trampoline */
struct hlist_head progs_hlist[BPF_TRAMP_MAX];
/* Number of attached programs. A counter per kind. */
@@ -471,11 +494,75 @@ struct bpf_trampoline {
void *image;
u64 selector;
};
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
struct bpf_dispatcher_prog {
struct bpf_prog *prog;
refcount_t users;
};
struct bpf_dispatcher {
/* dispatcher mutex */
struct mutex mutex;
void *func;
struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
int num_progs;
void *image;
u32 image_off;
};
static __always_inline unsigned int bpf_dispatcher_nopfunc(
const void *ctx,
const struct bpf_insn *insnsi,
unsigned int (*bpf_func)(const void *,
const struct bpf_insn *))
{
return bpf_func(ctx, insnsi);
}
#ifdef CONFIG_BPF_JIT
struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
int bpf_trampoline_link_prog(struct bpf_prog *prog);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
void bpf_trampoline_put(struct bpf_trampoline *tr);
#define BPF_DISPATCHER_INIT(name) { \
.mutex = __MUTEX_INITIALIZER(name.mutex), \
.func = &name##func, \
.progs = {}, \
.num_progs = 0, \
.image = NULL, \
.image_off = 0 \
}
#define DEFINE_BPF_DISPATCHER(name) \
noinline unsigned int name##func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
unsigned int (*bpf_func)(const void *, \
const struct bpf_insn *)) \
{ \
return bpf_func(ctx, insnsi); \
} \
EXPORT_SYMBOL(name##func); \
struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name);
#define DECLARE_BPF_DISPATCHER(name) \
unsigned int name##func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
unsigned int (*bpf_func)(const void *, \
const struct bpf_insn *)); \
extern struct bpf_dispatcher name;
#define BPF_DISPATCHER_FUNC(name) name##func
#define BPF_DISPATCHER_PTR(name) (&name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
struct bpf_image {
struct latch_tree_node tnode;
unsigned char data[];
};
#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
bool is_bpf_image_address(unsigned long address);
void *bpf_image_alloc(void);
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
@@ -490,9 +577,21 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
return -ENOTSUPP;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
#define DECLARE_BPF_DISPATCHER(name)
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc
#define BPF_DISPATCHER_PTR(name) NULL
static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
struct bpf_prog *from,
struct bpf_prog *to) {}
static inline bool is_bpf_image_address(unsigned long address)
{
return false;
}
#endif
struct bpf_func_info_aux {
u16 linkage;
bool unreliable;
};
@@ -603,6 +702,73 @@ struct bpf_array_aux {
struct work_struct work;
};
struct bpf_struct_ops_value;
struct btf_type;
struct btf_member;
#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
int (*check_member)(const struct btf_type *t,
const struct btf_member *member);
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
int (*reg)(void *kdata);
void (*unreg)(void *kdata);
const struct btf_type *type;
const struct btf_type *value_type;
const char *name;
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
u32 type_id;
u32 value_id;
};
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
void bpf_struct_ops_init(struct btf *btf);
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
return bpf_struct_ops_get(data);
else
return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
bpf_struct_ops_put(data);
else
module_put(owner);
}
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
return NULL;
}
static inline void bpf_struct_ops_init(struct btf *btf) { }
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
module_put(owner);
}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
void *key,
void *value)
{
return -EINVAL;
}
#endif
struct bpf_array {
struct bpf_map map;
u32 elem_size;
@@ -841,6 +1007,15 @@ void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int generic_map_update_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int generic_map_delete_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
extern int sysctl_unprivileged_bpf_disabled;
@@ -897,14 +1072,16 @@ struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_flush(struct bpf_map *map);
void __dev_flush(void);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(struct bpf_map *map);
void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);
@@ -941,7 +1118,15 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
const char *func_name,
struct btf_func_model *m);
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
struct bpf_reg_state;
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *reg);
int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
struct bpf_prog *bpf_prog_by_id(u32 id);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
@@ -1004,13 +1189,20 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map
return NULL;
}
static inline void __dev_map_flush(struct bpf_map *map)
static inline void __dev_flush(void)
{
}
struct xdp_buff;
struct bpf_dtab_netdev;
static inline
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
struct net_device *dev_rx)
{
return 0;
}
static inline
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx)
@@ -1033,7 +1225,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
return NULL;
}
static inline void __cpu_map_flush(struct bpf_map *map)
static inline void __cpu_map_flush(void)
{
}
@@ -1074,6 +1266,11 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
static inline void bpf_map_put(struct bpf_map *map)
{
}
static inline struct bpf_prog *bpf_prog_by_id(u32 id)
{
return ERR_PTR(-ENOTSUPP);
}
#endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -1227,6 +1424,7 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto;
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
extern const struct bpf_func_proto bpf_jiffies64_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);

View File

@@ -65,6 +65,12 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
struct sk_reuseport_md, struct sk_reuseport_kern)
#endif
#if defined(CONFIG_BPF_JIT)
BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
void *, void *)
BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension,
void *, void *)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -105,3 +111,6 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
#if defined(CONFIG_BPF_JIT)
BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
#endif

View File

@@ -304,11 +304,13 @@ struct bpf_insn_aux_data {
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
bool zext_dst; /* this insn zero extends dst reg */
u8 alu_state; /* used in combination with alu_limit */
bool prune_point;
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
bool prune_point;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -379,6 +381,7 @@ struct bpf_verifier_env {
int *insn_stack;
int cur_stack;
} cfg;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
u32 prev_insn_processed, insn_processed;
@@ -428,4 +431,7 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);
#endif /* _LINUX_BPF_VERIFIER_H */

View File

@@ -7,6 +7,8 @@
#include <linux/types.h>
#include <uapi/linux/btf.h>
#define BTF_TYPE_EMIT(type) ((void)(type *)0)
struct btf;
struct btf_member;
struct btf_type;
@@ -53,6 +55,22 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
u32 expected_offset, u32 expected_size);
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *
btf_resolve_size(const struct btf *btf, const struct btf_type *type,
u32 *type_size, const struct btf_type **elem_type,
u32 *total_nelems);
#define for_each_member(i, struct_type, member) \
for (i = 0, member = btf_type_member(struct_type); \
i < btf_type_vlen(struct_type); \
i++, member++)
static inline bool btf_type_is_ptr(const struct btf_type *t)
{
@@ -84,6 +102,40 @@ static inline bool btf_type_is_func_proto(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
}
static inline u16 btf_type_vlen(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
}
static inline u16 btf_func_linkage(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
}
static inline bool btf_type_kflag(const struct btf_type *t)
{
return BTF_INFO_KFLAG(t->info);
}
static inline u32 btf_member_bit_offset(const struct btf_type *struct_type,
const struct btf_member *member)
{
return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
: member->offset;
}
static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type,
const struct btf_member *member)
{
return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
: 0;
}
static inline const struct btf_member *btf_type_member(const struct btf_type *t)
{
return (const struct btf_member *)(t + 1);
}
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);

View File

@@ -7,7 +7,7 @@
* Copyright (C) 2013 Texas Instruments Inc.
* Contact: Eduardo Valentin <eduardo.valentin@ti.com>
*
* Highly based on cpu_cooling.c.
* Highly based on cpufreq_cooling.c.
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*/

View File

@@ -19,7 +19,7 @@
struct cpufreq_policy;
#ifdef CONFIG_CPU_THERMAL
#ifdef CONFIG_CPU_FREQ_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @policy: cpufreq policy.
@@ -40,7 +40,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct cpufreq_policy *policy);
#else /* !CONFIG_CPU_THERMAL */
#else /* !CONFIG_CPU_FREQ_THERMAL */
static inline struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
@@ -58,6 +58,24 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return NULL;
}
#endif /* CONFIG_CPU_THERMAL */
#endif /* CONFIG_CPU_FREQ_THERMAL */
struct cpuidle_driver;
#ifdef CONFIG_CPU_IDLE_THERMAL
int cpuidle_cooling_register(struct cpuidle_driver *drv);
int cpuidle_of_cooling_register(struct device_node *np,
struct cpuidle_driver *drv);
#else /* CONFIG_CPU_IDLE_THERMAL */
static inline int cpuidle_cooling_register(struct cpuidle_driver *drv)
{
return 0;
}
static inline int cpuidle_of_cooling_register(struct device_node *np,
struct cpuidle_driver *drv)
{
return 0;
}
#endif /* CONFIG_CPU_IDLE_THERMAL */
#endif /* __CPU_COOLING_H__ */

View File

@@ -59,6 +59,7 @@ enum cpuhp_state {
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,

View File

@@ -107,16 +107,9 @@
#define CRYPTO_TFM_NEED_KEY 0x00000001
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_RES_MASK 0xfff00000
#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
/*
* Miscellaneous stuff.
@@ -570,7 +563,7 @@ static inline int crypto_wait_req(int err, struct crypto_wait *wait)
reinit_completion(&wait->completion);
err = wait->err;
break;
};
}
return err;
}
@@ -584,9 +577,9 @@ static inline void crypto_init_wait(struct crypto_wait *wait)
* Algorithm registration interface.
*/
int crypto_register_alg(struct crypto_alg *alg);
int crypto_unregister_alg(struct crypto_alg *alg);
void crypto_unregister_alg(struct crypto_alg *alg);
int crypto_register_algs(struct crypto_alg *algs, int count);
int crypto_unregister_algs(struct crypto_alg *algs, int count);
void crypto_unregister_algs(struct crypto_alg *algs, int count);
/*
* Algorithm query interface.
@@ -599,34 +592,10 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
* crypto_free_*(), as well as the various helpers below.
*/
struct cipher_tfm {
int (*cit_setkey)(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen);
void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
};
struct compress_tfm {
int (*cot_compress)(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen);
int (*cot_decompress)(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen);
};
#define crt_cipher crt_u.cipher
#define crt_compress crt_u.compress
struct crypto_tfm {
u32 crt_flags;
union {
struct cipher_tfm cipher;
struct compress_tfm compress;
} crt_u;
void (*exit)(struct crypto_tfm *tfm);
struct crypto_alg *__crt_alg;
@@ -763,12 +732,6 @@ static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
return (struct crypto_cipher *)tfm;
}
static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return __crypto_cipher_cast(tfm);
}
/**
* crypto_alloc_cipher() - allocate single block cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -826,11 +789,6 @@ static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
return crypto_has_alg(alg_name, type, mask);
}
static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
{
return &crypto_cipher_tfm(tfm)->crt_cipher;
}
/**
* crypto_cipher_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -884,12 +842,8 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
const u8 *key, unsigned int keylen)
{
return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
key, keylen);
}
int crypto_cipher_setkey(struct crypto_cipher *tfm,
const u8 *key, unsigned int keylen);
/**
* crypto_cipher_encrypt_one() - encrypt one block of plaintext
@@ -900,12 +854,8 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
* Invoke the encryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
dst, src);
}
void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src);
/**
* crypto_cipher_decrypt_one() - decrypt one block of ciphertext
@@ -916,25 +866,14 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
* Invoke the decryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
dst, src);
}
void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src);
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
}
static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
{
BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
CRYPTO_ALG_TYPE_MASK);
return __crypto_comp_cast(tfm);
}
static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
u32 type, u32 mask)
{
@@ -969,26 +908,13 @@ static inline const char *crypto_comp_name(struct crypto_comp *tfm)
return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
}
static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
{
return &crypto_comp_tfm(tfm)->crt_compress;
}
int crypto_comp_compress(struct crypto_comp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen);
static inline int crypto_comp_compress(struct crypto_comp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
src, slen, dst, dlen);
}
static inline int crypto_comp_decompress(struct crypto_comp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
src, slen, dst, dlen);
}
int crypto_comp_decompress(struct crypto_comp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen);
#endif /* _LINUX_CRYPTO_H */

View File

@@ -37,8 +37,6 @@
* the structure defined in struct sja1105_private.
*/
struct sja1105_tagger_data {
struct sk_buff_head skb_rxtstamp_queue;
struct work_struct rxtstamp_work;
struct sk_buff *stampable_skb;
/* Protects concurrent access to the meta state machine
* from taggers running on multiple ports on SMP systems
@@ -55,10 +53,12 @@ struct sja1105_skb_cb {
((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
struct sja1105_port {
struct kthread_worker *xmit_worker;
struct kthread_work xmit_work;
struct sk_buff_head xmit_queue;
struct sja1105_tagger_data *data;
struct dsa_port *dp;
bool hwts_tx_en;
int mgmt_slot;
};
#endif /* _NET_DSA_SJA1105_H */

View File

@@ -48,6 +48,27 @@ typedef u16 efi_char16_t; /* UNICODE character */
typedef u64 efi_physical_addr_t;
typedef void *efi_handle_t;
#if defined(CONFIG_X86_64)
#define __efiapi __attribute__((ms_abi))
#elif defined(CONFIG_X86_32)
#define __efiapi __attribute__((regparm(0)))
#else
#define __efiapi
#endif
#define efi_get_handle_at(array, idx) \
(efi_is_native() ? (array)[idx] \
: (efi_handle_t)(unsigned long)((u32 *)(array))[idx])
#define efi_get_handle_num(size) \
((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
#define for_each_efi_handle(handle, array, size, i) \
for (i = 0; \
i < efi_get_handle_num(size) && \
((handle = efi_get_handle_at((array), i)) || true); \
i++)
/*
* The UEFI spec and EDK2 reference implementation both define EFI_GUID as
* struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment
@@ -251,106 +272,71 @@ typedef struct {
u32 create_event_ex;
} __packed efi_boot_services_32_t;
typedef struct {
efi_table_hdr_t hdr;
u64 raise_tpl;
u64 restore_tpl;
u64 allocate_pages;
u64 free_pages;
u64 get_memory_map;
u64 allocate_pool;
u64 free_pool;
u64 create_event;
u64 set_timer;
u64 wait_for_event;
u64 signal_event;
u64 close_event;
u64 check_event;
u64 install_protocol_interface;
u64 reinstall_protocol_interface;
u64 uninstall_protocol_interface;
u64 handle_protocol;
u64 __reserved;
u64 register_protocol_notify;
u64 locate_handle;
u64 locate_device_path;
u64 install_configuration_table;
u64 load_image;
u64 start_image;
u64 exit;
u64 unload_image;
u64 exit_boot_services;
u64 get_next_monotonic_count;
u64 stall;
u64 set_watchdog_timer;
u64 connect_controller;
u64 disconnect_controller;
u64 open_protocol;
u64 close_protocol;
u64 open_protocol_information;
u64 protocols_per_handle;
u64 locate_handle_buffer;
u64 locate_protocol;
u64 install_multiple_protocol_interfaces;
u64 uninstall_multiple_protocol_interfaces;
u64 calculate_crc32;
u64 copy_mem;
u64 set_mem;
u64 create_event_ex;
} __packed efi_boot_services_64_t;
/*
* EFI Boot Services table
*/
typedef struct {
efi_table_hdr_t hdr;
void *raise_tpl;
void *restore_tpl;
efi_status_t (*allocate_pages)(int, int, unsigned long,
efi_physical_addr_t *);
efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long);
efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *,
unsigned long *, u32 *);
efi_status_t (*allocate_pool)(int, unsigned long, void **);
efi_status_t (*free_pool)(void *);
void *create_event;
void *set_timer;
void *wait_for_event;
void *signal_event;
void *close_event;
void *check_event;
void *install_protocol_interface;
void *reinstall_protocol_interface;
void *uninstall_protocol_interface;
efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
void *__reserved;
void *register_protocol_notify;
efi_status_t (*locate_handle)(int, efi_guid_t *, void *,
unsigned long *, efi_handle_t *);
void *locate_device_path;
efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
void *load_image;
void *start_image;
void *exit;
void *unload_image;
efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long);
void *get_next_monotonic_count;
void *stall;
void *set_watchdog_timer;
void *connect_controller;
void *disconnect_controller;
void *open_protocol;
void *close_protocol;
void *open_protocol_information;
void *protocols_per_handle;
void *locate_handle_buffer;
efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
void *install_multiple_protocol_interfaces;
void *uninstall_multiple_protocol_interfaces;
void *calculate_crc32;
void *copy_mem;
void *set_mem;
void *create_event_ex;
typedef union {
struct {
efi_table_hdr_t hdr;
void *raise_tpl;
void *restore_tpl;
efi_status_t (__efiapi *allocate_pages)(int, int, unsigned long,
efi_physical_addr_t *);
efi_status_t (__efiapi *free_pages)(efi_physical_addr_t,
unsigned long);
efi_status_t (__efiapi *get_memory_map)(unsigned long *, void *,
unsigned long *,
unsigned long *, u32 *);
efi_status_t (__efiapi *allocate_pool)(int, unsigned long,
void **);
efi_status_t (__efiapi *free_pool)(void *);
void *create_event;
void *set_timer;
void *wait_for_event;
void *signal_event;
void *close_event;
void *check_event;
void *install_protocol_interface;
void *reinstall_protocol_interface;
void *uninstall_protocol_interface;
efi_status_t (__efiapi *handle_protocol)(efi_handle_t,
efi_guid_t *, void **);
void *__reserved;
void *register_protocol_notify;
efi_status_t (__efiapi *locate_handle)(int, efi_guid_t *,
void *, unsigned long *,
efi_handle_t *);
void *locate_device_path;
efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *,
void *);
void *load_image;
void *start_image;
void *exit;
void *unload_image;
efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
unsigned long);
void *get_next_monotonic_count;
void *stall;
void *set_watchdog_timer;
void *connect_controller;
efi_status_t (__efiapi *disconnect_controller)(efi_handle_t,
efi_handle_t,
efi_handle_t);
void *open_protocol;
void *close_protocol;
void *open_protocol_information;
void *protocols_per_handle;
void *locate_handle_buffer;
efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
void **);
void *install_multiple_protocol_interfaces;
void *uninstall_multiple_protocol_interfaces;
void *calculate_crc32;
void *copy_mem;
void *set_mem;
void *create_event_ex;
};
efi_boot_services_32_t mixed_mode;
} efi_boot_services_t;
typedef enum {
@@ -383,10 +369,14 @@ typedef struct {
u32 write;
} efi_pci_io_protocol_access_32_t;
typedef struct {
u64 read;
u64 write;
} efi_pci_io_protocol_access_64_t;
typedef union efi_pci_io_protocol efi_pci_io_protocol_t;
typedef
efi_status_t (__efiapi *efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *,
EFI_PCI_IO_PROTOCOL_WIDTH,
u32 offset,
unsigned long count,
void *buffer);
typedef struct {
void *read;
@@ -394,64 +384,54 @@ typedef struct {
} efi_pci_io_protocol_access_t;
typedef struct {
u32 poll_mem;
u32 poll_io;
efi_pci_io_protocol_access_32_t mem;
efi_pci_io_protocol_access_32_t io;
efi_pci_io_protocol_access_32_t pci;
u32 copy_mem;
u32 map;
u32 unmap;
u32 allocate_buffer;
u32 free_buffer;
u32 flush;
u32 get_location;
u32 attributes;
u32 get_bar_attributes;
u32 set_bar_attributes;
u64 romsize;
u32 romimage;
} efi_pci_io_protocol_32_t;
efi_pci_io_protocol_cfg_t read;
efi_pci_io_protocol_cfg_t write;
} efi_pci_io_protocol_config_access_t;
typedef struct {
u64 poll_mem;
u64 poll_io;
efi_pci_io_protocol_access_64_t mem;
efi_pci_io_protocol_access_64_t io;
efi_pci_io_protocol_access_64_t pci;
u64 copy_mem;
u64 map;
u64 unmap;
u64 allocate_buffer;
u64 free_buffer;
u64 flush;
u64 get_location;
u64 attributes;
u64 get_bar_attributes;
u64 set_bar_attributes;
u64 romsize;
u64 romimage;
} efi_pci_io_protocol_64_t;
typedef struct {
void *poll_mem;
void *poll_io;
efi_pci_io_protocol_access_t mem;
efi_pci_io_protocol_access_t io;
efi_pci_io_protocol_access_t pci;
void *copy_mem;
void *map;
void *unmap;
void *allocate_buffer;
void *free_buffer;
void *flush;
void *get_location;
void *attributes;
void *get_bar_attributes;
void *set_bar_attributes;
uint64_t romsize;
void *romimage;
} efi_pci_io_protocol_t;
union efi_pci_io_protocol {
struct {
void *poll_mem;
void *poll_io;
efi_pci_io_protocol_access_t mem;
efi_pci_io_protocol_access_t io;
efi_pci_io_protocol_config_access_t pci;
void *copy_mem;
void *map;
void *unmap;
void *allocate_buffer;
void *free_buffer;
void *flush;
efi_status_t (__efiapi *get_location)(efi_pci_io_protocol_t *,
unsigned long *segment_nr,
unsigned long *bus_nr,
unsigned long *device_nr,
unsigned long *func_nr);
void *attributes;
void *get_bar_attributes;
void *set_bar_attributes;
uint64_t romsize;
void *romimage;
};
struct {
u32 poll_mem;
u32 poll_io;
efi_pci_io_protocol_access_32_t mem;
efi_pci_io_protocol_access_32_t io;
efi_pci_io_protocol_access_32_t pci;
u32 copy_mem;
u32 map;
u32 unmap;
u32 allocate_buffer;
u32 free_buffer;
u32 flush;
u32 get_location;
u32 attributes;
u32 get_bar_attributes;
u32 set_bar_attributes;
u64 romsize;
u32 romimage;
} mixed_mode;
};
#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
@@ -473,54 +453,62 @@ typedef struct {
#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
typedef struct {
u32 version;
u32 get;
u32 set;
u32 del;
u32 get_all;
} apple_properties_protocol_32_t;
struct efi_dev_path;
typedef struct {
u64 version;
u64 get;
u64 set;
u64 del;
u64 get_all;
} apple_properties_protocol_64_t;
typedef union apple_properties_protocol apple_properties_protocol_t;
typedef struct {
u32 get_capability;
u32 get_event_log;
u32 hash_log_extend_event;
u32 submit_command;
u32 get_active_pcr_banks;
u32 set_active_pcr_banks;
u32 get_result_of_set_active_pcr_banks;
} efi_tcg2_protocol_32_t;
typedef struct {
u64 get_capability;
u64 get_event_log;
u64 hash_log_extend_event;
u64 submit_command;
u64 get_active_pcr_banks;
u64 set_active_pcr_banks;
u64 get_result_of_set_active_pcr_banks;
} efi_tcg2_protocol_64_t;
union apple_properties_protocol {
struct {
unsigned long version;
efi_status_t (__efiapi *get)(apple_properties_protocol_t *,
struct efi_dev_path *,
efi_char16_t *, void *, u32 *);
efi_status_t (__efiapi *set)(apple_properties_protocol_t *,
struct efi_dev_path *,
efi_char16_t *, void *, u32);
efi_status_t (__efiapi *del)(apple_properties_protocol_t *,
struct efi_dev_path *,
efi_char16_t *);
efi_status_t (__efiapi *get_all)(apple_properties_protocol_t *,
void *buffer, u32 *);
};
struct {
u32 version;
u32 get;
u32 set;
u32 del;
u32 get_all;
} mixed_mode;
};
typedef u32 efi_tcg2_event_log_format;
typedef struct {
void *get_capability;
efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format,
efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *);
void *hash_log_extend_event;
void *submit_command;
void *get_active_pcr_banks;
void *set_active_pcr_banks;
void *get_result_of_set_active_pcr_banks;
} efi_tcg2_protocol_t;
typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
union efi_tcg2_protocol {
struct {
void *get_capability;
efi_status_t (__efiapi *get_event_log)(efi_handle_t,
efi_tcg2_event_log_format,
efi_physical_addr_t *,
efi_physical_addr_t *,
efi_bool_t *);
void *hash_log_extend_event;
void *submit_command;
void *get_active_pcr_banks;
void *set_active_pcr_banks;
void *get_result_of_set_active_pcr_banks;
};
struct {
u32 get_capability;
u32 get_event_log;
u32 hash_log_extend_event;
u32 submit_command;
u32 get_active_pcr_banks;
u32 set_active_pcr_banks;
u32 get_result_of_set_active_pcr_banks;
} mixed_mode;
};
/*
* Types and defines for EFI ResetSystem
@@ -553,24 +541,6 @@ typedef struct {
u32 query_variable_info;
} efi_runtime_services_32_t;
typedef struct {
efi_table_hdr_t hdr;
u64 get_time;
u64 set_time;
u64 get_wakeup_time;
u64 set_wakeup_time;
u64 set_virtual_address_map;
u64 convert_pointer;
u64 get_variable;
u64 get_next_variable;
u64 set_variable;
u64 get_next_high_mono_count;
u64 reset_system;
u64 update_capsule;
u64 query_capsule_caps;
u64 query_variable_info;
} efi_runtime_services_64_t;
typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
typedef efi_status_t efi_set_time_t (efi_time_t *tm);
typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
@@ -605,22 +575,25 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
unsigned long size,
bool nonblocking);
typedef struct {
efi_table_hdr_t hdr;
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
efi_set_wakeup_time_t *set_wakeup_time;
efi_set_virtual_address_map_t *set_virtual_address_map;
void *convert_pointer;
efi_get_variable_t *get_variable;
efi_get_next_variable_t *get_next_variable;
efi_set_variable_t *set_variable;
efi_get_next_high_mono_count_t *get_next_high_mono_count;
efi_reset_system_t *reset_system;
efi_update_capsule_t *update_capsule;
efi_query_capsule_caps_t *query_capsule_caps;
efi_query_variable_info_t *query_variable_info;
typedef union {
struct {
efi_table_hdr_t hdr;
efi_get_time_t __efiapi *get_time;
efi_set_time_t __efiapi *set_time;
efi_get_wakeup_time_t __efiapi *get_wakeup_time;
efi_set_wakeup_time_t __efiapi *set_wakeup_time;
efi_set_virtual_address_map_t __efiapi *set_virtual_address_map;
void *convert_pointer;
efi_get_variable_t __efiapi *get_variable;
efi_get_next_variable_t __efiapi *get_next_variable;
efi_set_variable_t __efiapi *set_variable;
efi_get_next_high_mono_count_t __efiapi *get_next_high_mono_count;
efi_reset_system_t __efiapi *reset_system;
efi_update_capsule_t __efiapi *update_capsule;
efi_query_capsule_caps_t __efiapi *query_capsule_caps;
efi_query_variable_info_t __efiapi *query_variable_info;
};
efi_runtime_services_32_t mixed_mode;
} efi_runtime_services_t;
void efi_native_runtime_setup(void);
@@ -706,9 +679,12 @@ typedef struct {
u32 table;
} efi_config_table_32_t;
typedef struct {
efi_guid_t guid;
unsigned long table;
typedef union {
struct {
efi_guid_t guid;
void *table;
};
efi_config_table_32_t mixed_mode;
} efi_config_table_t;
typedef struct {
@@ -760,32 +736,38 @@ typedef struct {
u32 tables;
} efi_system_table_32_t;
typedef struct {
efi_table_hdr_t hdr;
unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
u32 fw_revision;
unsigned long con_in_handle;
unsigned long con_in;
unsigned long con_out_handle;
unsigned long con_out;
unsigned long stderr_handle;
unsigned long stderr;
efi_runtime_services_t *runtime;
efi_boot_services_t *boottime;
unsigned long nr_tables;
unsigned long tables;
typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t;
typedef union {
struct {
efi_table_hdr_t hdr;
unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
u32 fw_revision;
unsigned long con_in_handle;
unsigned long con_in;
unsigned long con_out_handle;
efi_simple_text_output_protocol_t *con_out;
unsigned long stderr_handle;
unsigned long stderr;
efi_runtime_services_t *runtime;
efi_boot_services_t *boottime;
unsigned long nr_tables;
unsigned long tables;
};
efi_system_table_32_t mixed_mode;
} efi_system_table_t;
/*
* Architecture independent structure for describing a memory map for the
* benefit of efi_memmap_init_early(), saving us the need to pass four
* parameters.
* benefit of efi_memmap_init_early(), and for passing context between
* efi_memmap_alloc() and efi_memmap_install().
*/
struct efi_memory_map_data {
phys_addr_t phys_map;
unsigned long size;
unsigned long desc_version;
unsigned long desc_size;
unsigned long flags;
};
struct efi_memory_map {
@@ -795,7 +777,10 @@ struct efi_memory_map {
int nr_map;
unsigned long desc_version;
unsigned long desc_size;
bool late;
#define EFI_MEMMAP_LATE (1UL << 0)
#define EFI_MEMMAP_MEMBLOCK (1UL << 1)
#define EFI_MEMMAP_SLAB (1UL << 2)
unsigned long flags;
};
struct efi_mem_range {
@@ -811,38 +796,6 @@ struct efi_fdt_params {
u32 desc_ver;
};
typedef struct {
u32 revision;
u32 parent_handle;
u32 system_table;
u32 device_handle;
u32 file_path;
u32 reserved;
u32 load_options_size;
u32 load_options;
u32 image_base;
__aligned_u64 image_size;
unsigned int image_code_type;
unsigned int image_data_type;
u32 unload;
} efi_loaded_image_32_t;
typedef struct {
u32 revision;
u64 parent_handle;
u64 system_table;
u64 device_handle;
u64 file_path;
u64 reserved;
u32 load_options_size;
u64 load_options;
u64 image_base;
__aligned_u64 image_size;
unsigned int image_code_type;
unsigned int image_data_type;
u64 unload;
} efi_loaded_image_64_t;
typedef struct {
u32 revision;
efi_handle_t parent_handle;
@@ -856,10 +809,9 @@ typedef struct {
__aligned_u64 image_size;
unsigned int image_code_type;
unsigned int image_data_type;
efi_status_t (*unload)(efi_handle_t image_handle);
efi_status_t ( __efiapi *unload)(efi_handle_t image_handle);
} efi_loaded_image_t;
typedef struct {
u64 size;
u64 file_size;
@@ -871,67 +823,34 @@ typedef struct {
efi_char16_t filename[1];
} efi_file_info_t;
typedef struct {
u64 revision;
u32 open;
u32 close;
u32 delete;
u32 read;
u32 write;
u32 get_position;
u32 set_position;
u32 get_info;
u32 set_info;
u32 flush;
} efi_file_handle_32_t;
typedef struct efi_file_handle efi_file_handle_t;
typedef struct {
struct efi_file_handle {
u64 revision;
u64 open;
u64 close;
u64 delete;
u64 read;
u64 write;
u64 get_position;
u64 set_position;
u64 get_info;
u64 set_info;
u64 flush;
} efi_file_handle_64_t;
typedef struct _efi_file_handle {
u64 revision;
efi_status_t (*open)(struct _efi_file_handle *,
struct _efi_file_handle **,
efi_char16_t *, u64, u64);
efi_status_t (*close)(struct _efi_file_handle *);
efi_status_t (__efiapi *open)(efi_file_handle_t *,
efi_file_handle_t **,
efi_char16_t *, u64, u64);
efi_status_t (__efiapi *close)(efi_file_handle_t *);
void *delete;
efi_status_t (*read)(struct _efi_file_handle *, unsigned long *,
void *);
efi_status_t (__efiapi *read)(efi_file_handle_t *,
unsigned long *, void *);
void *write;
void *get_position;
void *set_position;
efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *,
unsigned long *, void *);
efi_status_t (__efiapi *get_info)(efi_file_handle_t *,
efi_guid_t *, unsigned long *,
void *);
void *set_info;
void *flush;
} efi_file_handle_t;
};
typedef struct {
u64 revision;
u32 open_volume;
} efi_file_io_interface_32_t;
typedef struct efi_file_io_interface efi_file_io_interface_t;
typedef struct {
struct efi_file_io_interface {
u64 revision;
u64 open_volume;
} efi_file_io_interface_64_t;
typedef struct _efi_file_io_interface {
u64 revision;
int (*open_volume)(struct _efi_file_io_interface *,
efi_file_handle_t **);
} efi_file_io_interface_t;
int (__efiapi *open_volume)(efi_file_io_interface_t *,
efi_file_handle_t **);
};
#define EFI_FILE_MODE_READ 0x0000000000000001
#define EFI_FILE_MODE_WRITE 0x0000000000000002
@@ -1015,7 +934,6 @@ extern struct efi {
efi_query_capsule_caps_t *query_capsule_caps;
efi_get_next_high_mono_count_t *get_next_high_mono_count;
efi_reset_system_t *reset_system;
efi_set_virtual_address_map_t *set_virtual_address_map;
struct efi_memory_map memmap;
unsigned long flags;
} efi;
@@ -1056,11 +974,14 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
extern int __init efi_memmap_alloc(unsigned int num_entries,
struct efi_memory_map_data *data);
extern void __efi_memmap_free(u64 phys, unsigned long size,
unsigned long flags);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);
extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
extern int __init efi_memmap_install(struct efi_memory_map_data *data);
extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
struct range *range);
extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
@@ -1391,22 +1312,18 @@ struct efivar_entry {
bool deleting;
};
typedef struct {
u32 reset;
u32 output_string;
u32 test_string;
} efi_simple_text_output_protocol_32_t;
typedef struct {
u64 reset;
u64 output_string;
u64 test_string;
} efi_simple_text_output_protocol_64_t;
struct efi_simple_text_output_protocol {
void *reset;
efi_status_t (*output_string)(void *, void *);
void *test_string;
union efi_simple_text_output_protocol {
struct {
void *reset;
efi_status_t (__efiapi *output_string)(efi_simple_text_output_protocol_t *,
efi_char16_t *);
void *test_string;
};
struct {
u32 reset;
u32 output_string;
u32 test_string;
} mixed_mode;
};
#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
@@ -1415,74 +1332,60 @@ struct efi_simple_text_output_protocol {
#define PIXEL_BLT_ONLY 3
#define PIXEL_FORMAT_MAX 4
struct efi_pixel_bitmask {
typedef struct {
u32 red_mask;
u32 green_mask;
u32 blue_mask;
u32 reserved_mask;
};
} efi_pixel_bitmask_t;
struct efi_graphics_output_mode_info {
typedef struct {
u32 version;
u32 horizontal_resolution;
u32 vertical_resolution;
int pixel_format;
struct efi_pixel_bitmask pixel_information;
efi_pixel_bitmask_t pixel_information;
u32 pixels_per_scan_line;
} __packed;
} efi_graphics_output_mode_info_t;
struct efi_graphics_output_protocol_mode_32 {
u32 max_mode;
u32 mode;
u32 info;
u32 size_of_info;
u64 frame_buffer_base;
u32 frame_buffer_size;
} __packed;
typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t;
struct efi_graphics_output_protocol_mode_64 {
u32 max_mode;
u32 mode;
u64 info;
u64 size_of_info;
u64 frame_buffer_base;
u64 frame_buffer_size;
} __packed;
struct efi_graphics_output_protocol_mode {
u32 max_mode;
u32 mode;
unsigned long info;
unsigned long size_of_info;
u64 frame_buffer_base;
unsigned long frame_buffer_size;
} __packed;
struct efi_graphics_output_protocol_32 {
u32 query_mode;
u32 set_mode;
u32 blt;
u32 mode;
union efi_graphics_output_protocol_mode {
struct {
u32 max_mode;
u32 mode;
efi_graphics_output_mode_info_t *info;
unsigned long size_of_info;
efi_physical_addr_t frame_buffer_base;
unsigned long frame_buffer_size;
};
struct {
u32 max_mode;
u32 mode;
u32 info;
u32 size_of_info;
u64 frame_buffer_base;
u32 frame_buffer_size;
} mixed_mode;
};
struct efi_graphics_output_protocol_64 {
u64 query_mode;
u64 set_mode;
u64 blt;
u64 mode;
};
typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t;
struct efi_graphics_output_protocol {
unsigned long query_mode;
unsigned long set_mode;
unsigned long blt;
struct efi_graphics_output_protocol_mode *mode;
union efi_graphics_output_protocol {
struct {
void *query_mode;
void *set_mode;
void *blt;
efi_graphics_output_protocol_mode_t *mode;
};
struct {
u32 query_mode;
u32 set_mode;
u32 blt;
u32 mode;
} mixed_mode;
};
typedef efi_status_t (*efi_graphics_output_protocol_query_mode)(
struct efi_graphics_output_protocol *, u32, unsigned long *,
struct efi_graphics_output_mode_info **);
extern struct list_head efivar_sysfs_list;
static inline void
@@ -1582,24 +1485,19 @@ static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
/* prototypes shared between arch specific and generic stub code */
void efi_printk(efi_system_table_t *sys_table_arg, char *str);
void efi_printk(char *str);
void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
unsigned long addr);
void efi_free(unsigned long size, unsigned long addr);
char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
efi_loaded_image_t *image, int *cmd_line_len);
char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map);
efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min);
static inline
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
efi_status_t efi_low_alloc(unsigned long size, unsigned long align,
unsigned long *addr)
{
/*
@@ -1607,23 +1505,20 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
* checks pointers against NULL. Skip the first 8
* bytes so we start at a nice even number.
*/
return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8);
return efi_low_alloc_above(size, align, addr, 0x8);
}
efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
efi_status_t efi_high_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long max);
efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
unsigned long *image_addr,
efi_status_t efi_relocate_kernel(unsigned long *image_addr,
unsigned long image_size,
unsigned long alloc_size,
unsigned long preferred_addr,
unsigned long alignment,
unsigned long min_addr);
efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
efi_loaded_image_t *image,
efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
char *cmd_line, char *option_string,
unsigned long max_addr,
unsigned long *load_addr,
@@ -1631,8 +1526,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
efi_status_t efi_parse_options(char const *cmdline);
efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
struct screen_info *si, efi_guid_t *proto,
efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
unsigned long size);
#ifdef CONFIG_EFI
@@ -1650,18 +1544,18 @@ enum efi_secureboot_mode {
efi_secureboot_mode_disabled,
efi_secureboot_mode_enabled,
};
enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
enum efi_secureboot_mode efi_get_secureboot(void);
#ifdef CONFIG_RESET_ATTACK_MITIGATION
void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg);
void efi_enable_reset_attack_mitigation(void);
#else
static inline void
efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
efi_enable_reset_attack_mitigation(void) { }
#endif
efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
efi_status_t efi_random_get_seed(void);
void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
void efi_retrieve_tpm2_eventlog(void);
/*
* Arch code can implement the following three template macros, avoiding
@@ -1713,12 +1607,10 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
})
typedef efi_status_t (*efi_exit_boot_map_processing)(
efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map,
void *priv);
efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
void *handle,
efi_status_t efi_exit_boot_services(void *handle,
struct efi_boot_memmap *map,
void *priv,
efi_exit_boot_map_processing priv_func);
@@ -1809,4 +1701,6 @@ struct linux_efi_memreserve {
#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
/ sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
void efi_pci_disable_bridge_busmaster(void);
#endif /* _LINUX_EFI_H */

View File

@@ -43,7 +43,6 @@ __be16 eth_header_parse_protocol(const struct sk_buff *skb);
int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
void eth_commit_mac_addr_change(struct net_device *dev, void *p);
int eth_mac_addr(struct net_device *dev, void *p);
int eth_change_mtu(struct net_device *dev, int new_mtu);
int eth_validate_addr(struct net_device *dev);
struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,

View File

@@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_ETHTOOL_NETLINK_H_
#define _LINUX_ETHTOOL_NETLINK_H_
#include <uapi/linux/ethtool_netlink.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
enum ethtool_multicast_groups {
ETHNL_MCGRP_MONITOR,
};
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */

View File

@@ -559,23 +559,26 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
#define BPF_PROG_RUN(prog, ctx) ({ \
u32 ret; \
cant_sleep(); \
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
struct bpf_prog_stats *stats; \
u64 start = sched_clock(); \
ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
stats = this_cpu_ptr(prog->aux->stats); \
u64_stats_update_begin(&stats->syncp); \
stats->cnt++; \
stats->nsecs += sched_clock() - start; \
u64_stats_update_end(&stats->syncp); \
} else { \
ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
} \
#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
u32 ret; \
cant_sleep(); \
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
struct bpf_prog_stats *stats; \
u64 start = sched_clock(); \
ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
stats = this_cpu_ptr(prog->aux->stats); \
u64_stats_update_begin(&stats->syncp); \
stats->cnt++; \
stats->nsecs += sched_clock() - start; \
u64_stats_update_end(&stats->syncp); \
} else { \
ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
} \
ret; })
#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \
bpf_dispatcher_nopfunc)
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
struct bpf_skb_data_end {
@@ -589,7 +592,6 @@ struct bpf_redirect_info {
u32 tgt_index;
void *tgt_value;
struct bpf_map *map;
struct bpf_map *map_to_flush;
u32 kern_flags;
};
@@ -699,6 +701,8 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return res;
}
DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
@@ -708,9 +712,12 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
* already takes rcu_read_lock() when fetching the program, so
* it's not necessary here anymore.
*/
return BPF_PROG_RUN(prog, xdp);
return __BPF_PROG_RUN(prog, xdp,
BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
}
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
@@ -836,6 +843,8 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bpf_aux_classic_check_t trans, bool save_orig);
void bpf_prog_destroy(struct bpf_prog *fp);
const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
@@ -909,7 +918,7 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
return 0;
}
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
* because we only track one map and force a flush when the map changes.
@@ -920,7 +929,13 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
void xdp_do_flush_map(void);
void xdp_do_flush(void);
/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
* it is no longer only flushing maps. Keep this define for compatibility
* until all drivers are updated - do not use xdp_do_flush_map() in new code!
*/
#define xdp_do_flush_map xdp_do_flush
void bpf_warn_invalid_xdp_action(u32 act);

View File

@@ -855,7 +855,7 @@ static inline loff_t i_size_read(const struct inode *inode)
i_size = inode->i_size;
} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
return i_size;
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
loff_t i_size;
preempt_disable();
@@ -880,7 +880,7 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
inode->i_size = i_size;
write_seqcount_end(&inode->i_size_seqcount);
preempt_enable();
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
preempt_disable();
inode->i_size = i_size;
preempt_enable();

View File

@@ -72,6 +72,21 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode)
return READ_ONCE(inode->i_crypt_info) != NULL;
}
/**
* fscrypt_needs_contents_encryption() - check whether an inode needs
* contents encryption
*
* Return: %true iff the inode is an encrypted regular file and the kernel was
* built with fscrypt support.
*
* If you need to know whether the encrypt bit is set even when the kernel was
* built without fscrypt support, you must use IS_ENCRYPTED() directly instead.
*/
static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
{
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
}
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return inode->i_sb->s_cop->dummy_context &&
@@ -153,82 +168,14 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
const struct fscrypt_str *, struct fscrypt_str *);
#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
/* Extracts the second-to-last ciphertext block; see explanation below */
#define FSCRYPT_FNAME_DIGEST(name, len) \
((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
FS_CRYPTO_BLOCK_SIZE))
#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
/**
* fscrypt_digested_name - alternate identifier for an on-disk filename
*
* When userspace lists an encrypted directory without access to the key,
* filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
* bytes are shown in this abbreviated form (base64-encoded) rather than as the
* full ciphertext (base64-encoded). This is necessary to allow supporting
* filenames up to NAME_MAX bytes, since base64 encoding expands the length.
*
* To make it possible for filesystems to still find the correct directory entry
* despite not knowing the full on-disk name, we encode any filesystem-specific
* 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
* followed by the second-to-last ciphertext block of the filename. Due to the
* use of the CBC-CTS encryption mode, the second-to-last ciphertext block
* depends on the full plaintext. (Note that ciphertext stealing causes the
* last two blocks to appear "flipped".) This makes accidental collisions very
* unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
* share the same filesystem-specific hashes.
*
* However, this scheme isn't immune to intentional collisions, which can be
* created by anyone able to create arbitrary plaintext filenames and view them
* without the key. Making the "digest" be a real cryptographic hash like
* SHA-256 over the full ciphertext would prevent this, although it would be
* less efficient and harder to implement, especially since the filesystem would
* need to calculate it for each directory entry examined during a search.
*/
struct fscrypt_digested_name {
u32 hash;
u32 minor_hash;
u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
};
/**
* fscrypt_match_name() - test whether the given name matches a directory entry
* @fname: the name being searched for
* @de_name: the name from the directory entry
* @de_name_len: the length of @de_name in bytes
*
* Normally @fname->disk_name will be set, and in that case we simply compare
* that to the name stored in the directory entry. The only exception is that
* if we don't have the key for an encrypted directory and a filename in it is
* very long, then we won't have the full disk_name and we'll instead need to
* match against the fscrypt_digested_name.
*
* Return: %true if the name matches, otherwise %false.
*/
static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
if (unlikely(!fname->disk_name.name)) {
const struct fscrypt_digested_name *n =
(const void *)fname->crypto_buf.name;
if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
return false;
if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
return false;
return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
}
if (de_name_len != fname->disk_name.len)
return false;
return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
}
extern int fscrypt_fname_disk_to_usr(const struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname);
extern bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len);
extern u64 fscrypt_fname_siphash(const struct inode *dir,
const struct qstr *name);
/* bio.c */
extern void fscrypt_decrypt_bio(struct bio *);
@@ -246,6 +193,8 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir,
unsigned int flags);
extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct fscrypt_name *fname);
extern int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags);
extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link);
@@ -267,6 +216,11 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode)
return false;
}
static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
{
return false;
}
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return false;
@@ -438,7 +392,7 @@ static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
return;
}
static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
static inline int fscrypt_fname_disk_to_usr(const struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
@@ -455,6 +409,13 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
}
static inline u64 fscrypt_fname_siphash(const struct inode *dir,
const struct qstr *name)
{
WARN_ON_ONCE(1);
return 0;
}
/* bio.c */
static inline void fscrypt_decrypt_bio(struct bio *bio)
{
@@ -497,6 +458,13 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
return -EOPNOTSUPP;
}
static inline int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags,
unsigned int flags)
{
return 0;
}
static inline int __fscrypt_prepare_symlink(struct inode *dir,
unsigned int len,
unsigned int max_len,

View File

@@ -0,0 +1,55 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2019 NXP */
#ifndef _FSL_ENETC_MDIO_H_
#define _FSL_ENETC_MDIO_H_
#include <linux/phy.h>
/* PCS registers */
#define ENETC_PCS_LINK_TIMER1 0x12
#define ENETC_PCS_LINK_TIMER1_VAL 0x06a0
#define ENETC_PCS_LINK_TIMER2 0x13
#define ENETC_PCS_LINK_TIMER2_VAL 0x0003
#define ENETC_PCS_IF_MODE 0x14
#define ENETC_PCS_IF_MODE_SGMII_EN BIT(0)
#define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1)
#define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2))
/* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset
* Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS
* still thinks it's at gigabit.
*/
enum enetc_pcs_speed {
ENETC_PCS_SPEED_10 = 0,
ENETC_PCS_SPEED_100 = 1,
ENETC_PCS_SPEED_1000 = 2,
ENETC_PCS_SPEED_2500 = 2,
};
struct enetc_hw;
struct enetc_mdio_priv {
struct enetc_hw *hw;
int mdio_base;
};
#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
#else
static inline int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{ return -EINVAL; }
static inline int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
u16 value)
{ return -EINVAL; }
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
#endif
#endif

View File

@@ -192,6 +192,7 @@ int ptp_qoriq_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts);
int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event);
#ifdef CONFIG_DEBUG_FS
void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq);
void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq);

View File

@@ -77,6 +77,10 @@ struct fsverity_operations {
*
* @inode: the inode
* @index: 0-based index of the page within the Merkle tree
* @num_ra_pages: The number of Merkle tree pages that should be
* prefetched starting at @index if the page at @index
* isn't already cached. Implementations may ignore this
* argument; it's only a performance optimization.
*
* This can be called at any time on an open verity file, as well as
* between ->begin_enable_verity() and ->end_enable_verity(). It may be
@@ -87,7 +91,8 @@ struct fsverity_operations {
* Return: the page on success, ERR_PTR() on failure
*/
struct page *(*read_merkle_tree_page)(struct inode *inode,
pgoff_t index);
pgoff_t index,
unsigned long num_ra_pages);
/**
* Write a Merkle tree block to the given inode.

View File

@@ -730,7 +730,7 @@ static inline void hd_free_part(struct hd_struct *part)
* accessor function.
*
* Code written along the lines of i_size_read() and i_size_write().
* CONFIG_PREEMPT case optimizes the case of UP kernel with preemption
* CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
* on.
*/
static inline sector_t part_nr_sects_read(struct hd_struct *part)
@@ -743,7 +743,7 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
nr_sects = part->nr_sects;
} while (read_seqcount_retry(&part->nr_sects_seq, seq));
return nr_sects;
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
sector_t nr_sects;
preempt_disable();
@@ -766,7 +766,7 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
write_seqcount_begin(&part->nr_sects_seq);
part->nr_sects = size;
write_seqcount_end(&part->nr_sects_seq);
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
preempt_disable();
part->nr_sects = size;
preempt_enable();

View File

@@ -23,6 +23,7 @@ extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
extern void ima_post_path_mknod(struct dentry *dentry);
extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
extern void ima_kexec_cmdline(const void *buf, int size);
#ifdef CONFIG_IMA_KEXEC
@@ -91,6 +92,11 @@ static inline void ima_post_path_mknod(struct dentry *dentry)
return;
}
static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
{
return -EOPNOTSUPP;
}
static inline void ima_kexec_cmdline(const void *buf, int size) {}
#endif /* CONFIG_IMA */
@@ -101,6 +107,20 @@ static inline void ima_add_kexec_buffer(struct kimage *image)
{}
#endif
#ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS
extern void ima_post_key_create_or_update(struct key *keyring,
struct key *key,
const void *payload, size_t plen,
unsigned long flags, bool create);
#else
static inline void ima_post_key_create_or_update(struct key *keyring,
struct key *key,
const void *payload,
size_t plen,
unsigned long flags,
bool create) {}
#endif /* CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS */
#ifdef CONFIG_IMA_APPRAISE
extern bool is_ima_appraise_enabled(void);
extern void ima_inode_post_setattr(struct dentry *dentry);

View File

@@ -228,4 +228,10 @@ static inline void kasan_release_vmalloc(unsigned long start,
unsigned long free_region_end) {}
#endif
#ifdef CONFIG_KASAN_INLINE
void kasan_non_canonical_hook(unsigned long addr);
#else /* CONFIG_KASAN_INLINE */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
#endif /* CONFIG_KASAN_INLINE */
#endif /* LINUX_KASAN_H */

View File

@@ -23,6 +23,13 @@
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
/**
* INIT_LIST_HEAD - Initialize a list_head structure
* @list: list_head structure to be initialized.
*
* Initializes the list_head to point to itself. If it is a list header,
* the result is an empty list.
*/
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
@@ -120,12 +127,6 @@ static inline void __list_del_clearprev(struct list_head *entry)
entry->prev = NULL;
}
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void __list_del_entry(struct list_head *entry)
{
if (!__list_del_entry_valid(entry))
@@ -134,6 +135,12 @@ static inline void __list_del_entry(struct list_head *entry)
__list_del(entry->prev, entry->next);
}
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void list_del(struct list_head *entry)
{
__list_del_entry(entry);
@@ -157,8 +164,15 @@ static inline void list_replace(struct list_head *old,
new->prev->next = new;
}
/**
* list_replace_init - replace old entry by new one and initialize the old one
* @old : the element to be replaced
* @new : the new element to insert
*
* If @old was empty, it will be overwritten.
*/
static inline void list_replace_init(struct list_head *old,
struct list_head *new)
struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
@@ -754,11 +768,36 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h)
h->pprev = NULL;
}
/**
* hlist_unhashed - Has node been removed from list and reinitialized?
* @h: Node to be checked
*
* Not that not all removal functions will leave a node in unhashed
* state. For example, hlist_nulls_del_init_rcu() does leave the
* node in unhashed state, but hlist_nulls_del() does not.
*/
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
/**
* hlist_unhashed_lockless - Version of hlist_unhashed for lockless use
* @h: Node to be checked
*
* This variant of hlist_unhashed() must be used in lockless contexts
* to avoid potential load-tearing. The READ_ONCE() is paired with the
* various WRITE_ONCE() in hlist helpers that are defined below.
*/
static inline int hlist_unhashed_lockless(const struct hlist_node *h)
{
return !READ_ONCE(h->pprev);
}
/**
* hlist_empty - Is the specified hlist_head structure an empty hlist?
* @h: Structure to check.
*/
static inline int hlist_empty(const struct hlist_head *h)
{
return !READ_ONCE(h->first);
@@ -771,9 +810,16 @@ static inline void __hlist_del(struct hlist_node *n)
WRITE_ONCE(*pprev, next);
if (next)
next->pprev = pprev;
WRITE_ONCE(next->pprev, pprev);
}
/**
* hlist_del - Delete the specified hlist_node from its list
* @n: Node to delete.
*
* Note that this function leaves the node in hashed state. Use
* hlist_del_init() or similar instead to unhash @n.
*/
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
@@ -781,6 +827,12 @@ static inline void hlist_del(struct hlist_node *n)
n->pprev = LIST_POISON2;
}
/**
* hlist_del_init - Delete the specified hlist_node from its list and initialize
* @n: Node to delete.
*
* Note that this function leaves the node in unhashed state.
*/
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
@@ -789,51 +841,83 @@ static inline void hlist_del_init(struct hlist_node *n)
}
}
/**
* hlist_add_head - add a new entry at the beginning of the hlist
* @n: new entry to be added
* @h: hlist head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
WRITE_ONCE(n->next, first);
if (first)
first->pprev = &n->next;
WRITE_ONCE(first->pprev, &n->next);
WRITE_ONCE(h->first, n);
n->pprev = &h->first;
WRITE_ONCE(n->pprev, &h->first);
}
/* next must be != NULL */
/**
* hlist_add_before - add a new entry before the one specified
* @n: new entry to be added
* @next: hlist node to add it before, which must be non-NULL
*/
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
WRITE_ONCE(n->pprev, next->pprev);
WRITE_ONCE(n->next, next);
WRITE_ONCE(next->pprev, &n->next);
WRITE_ONCE(*(n->pprev), n);
}
/**
* hlist_add_behing - add a new entry after the one specified
* @n: new entry to be added
* @prev: hlist node to add it after, which must be non-NULL
*/
static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
prev->next = n;
n->pprev = &prev->next;
WRITE_ONCE(n->next, prev->next);
WRITE_ONCE(prev->next, n);
WRITE_ONCE(n->pprev, &prev->next);
if (n->next)
n->next->pprev = &n->next;
WRITE_ONCE(n->next->pprev, &n->next);
}
/* after that we'll appear to be on some hlist and hlist_del will work */
/**
* hlist_add_fake - create a fake hlist consisting of a single headless node
* @n: Node to make a fake list out of
*
* This makes @n appear to be its own predecessor on a headless hlist.
* The point of this is to allow things like hlist_del() to work correctly
* in cases where there is no list.
*/
static inline void hlist_add_fake(struct hlist_node *n)
{
n->pprev = &n->next;
}
/**
* hlist_fake: Is this node a fake hlist?
* @h: Node to check for being a self-referential fake hlist.
*/
static inline bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
/*
/**
* hlist_is_singular_node - is node the only element of the specified hlist?
* @n: Node to check for singularity.
* @h: Header for potentially singular list.
*
* Check whether the node is the only node of the head without
* accessing head:
* accessing head, thus avoiding unnecessary cache misses.
*/
static inline bool
hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
@@ -841,7 +925,11 @@ hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
return !n->next && n->pprev == &h->first;
}
/*
/**
* hlist_move_list - Move an hlist
* @old: hlist_head for old list.
* @new: hlist_head for new list.
*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/

View File

@@ -56,11 +56,33 @@ static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
return ((unsigned long)ptr) >> 1;
}
/**
* hlist_nulls_unhashed - Has node been removed and reinitialized?
* @h: Node to be checked
*
* Not that not all removal functions will leave a node in unhashed state.
* For example, hlist_del_init_rcu() leaves the node in unhashed state,
* but hlist_nulls_del() does not.
*/
static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
{
return !h->pprev;
}
/**
* hlist_nulls_unhashed_lockless - Has node been removed and reinitialized?
* @h: Node to be checked
*
* Not that not all removal functions will leave a node in unhashed state.
* For example, hlist_del_init_rcu() leaves the node in unhashed state,
* but hlist_nulls_del() does not. Unlike hlist_nulls_unhashed(), this
* function may be used locklessly.
*/
static inline int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h)
{
return !READ_ONCE(h->pprev);
}
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
{
return is_a_nulls(READ_ONCE(h->first));
@@ -72,10 +94,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
n->pprev = &h->first;
WRITE_ONCE(n->pprev, &h->first);
h->first = n;
if (!is_a_nulls(first))
first->pprev = &n->next;
WRITE_ONCE(first->pprev, &n->next);
}
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
@@ -85,13 +107,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
WRITE_ONCE(*pprev, next);
if (!is_a_nulls(next))
next->pprev = pprev;
WRITE_ONCE(next->pprev, pprev);
}
static inline void hlist_nulls_del(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
n->pprev = LIST_POISON2;
WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**

View File

@@ -372,6 +372,56 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
return result | mii_adv_to_ethtool_adv_x(lpa);
}
/**
* mii_lpa_mod_linkmode_adv_sgmii
* @lp_advertising: pointer to destination link mode.
* @lpa: value of the MII_LPA register
*
* A small helper function that translates MII_LPA bits to
* linkmode advertisement settings for SGMII.
* Leaves other bits unchanged.
*/
static inline void
mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa)
{
u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK;
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising,
speed_duplex == LPA_SGMII_1000HALF);
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising,
speed_duplex == LPA_SGMII_1000FULL);
linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising,
speed_duplex == LPA_SGMII_100HALF);
linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising,
speed_duplex == LPA_SGMII_100FULL);
linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising,
speed_duplex == LPA_SGMII_10HALF);
linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising,
speed_duplex == LPA_SGMII_10FULL);
}
/**
* mii_lpa_to_linkmode_adv_sgmii
* @advertising: pointer to destination link mode.
* @lpa: value of the MII_LPA register
*
* A small helper function that translates MII_ADVERTISE bits
* to linkmode advertisement settings when in SGMII mode.
* Clears the old value of advertising.
*/
static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising,
u32 lpa)
{
linkmode_zero(lp_advertising);
mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa);
}
/**
* mii_adv_mod_linkmode_adv_t
* @advertising:pointer to destination link mode.

View File

@@ -0,0 +1,121 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for generic time stamping devices on MII buses.
* Copyright (C) 2018 Richard Cochran <richardcochran@gmail.com>
*/
#ifndef _LINUX_MII_TIMESTAMPER_H
#define _LINUX_MII_TIMESTAMPER_H
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
struct phy_device;
/**
* struct mii_timestamper - Callback interface to MII time stamping devices.
*
* @rxtstamp: Requests a Rx timestamp for 'skb'. If the skb is accepted,
* the MII time stamping device promises to deliver it using
* netif_rx() as soon as a timestamp becomes available. One of
* the PTP_CLASS_ values is passed in 'type'. The function
* must return true if the skb is accepted for delivery.
*
* @txtstamp: Requests a Tx timestamp for 'skb'. The MII time stamping
* device promises to deliver it using skb_complete_tx_timestamp()
* as soon as a timestamp becomes available. One of the PTP_CLASS_
* values is passed in 'type'.
*
* @hwtstamp: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
*
* @link_state: Allows the device to respond to changes in the link
* state. The caller invokes this function while holding
* the phy_device mutex.
*
* @ts_info: Handles ethtool queries for hardware time stamping.
* @device: Remembers the device to which the instance belongs.
*
* Drivers for PHY time stamping devices should embed their
* mii_timestamper within a private structure, obtaining a reference
* to it using container_of().
*
* Drivers for non-PHY time stamping devices should return a pointer
* to a mii_timestamper from the probe_channel() callback of their
* mii_timestamping_ctrl interface.
*/
struct mii_timestamper {
bool (*rxtstamp)(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type);
void (*txtstamp)(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type);
int (*hwtstamp)(struct mii_timestamper *mii_ts,
struct ifreq *ifreq);
void (*link_state)(struct mii_timestamper *mii_ts,
struct phy_device *phydev);
int (*ts_info)(struct mii_timestamper *mii_ts,
struct ethtool_ts_info *ts_info);
struct device *device;
};
/**
* struct mii_timestamping_ctrl - MII time stamping controller interface.
*
* @probe_channel: Callback into the controller driver announcing the
* presence of the 'port' channel. The 'device' field
* had been passed to register_mii_tstamp_controller().
* The driver must return either a pointer to a valid
* MII timestamper instance or PTR_ERR.
*
* @release_channel: Releases an instance obtained via .probe_channel.
*/
struct mii_timestamping_ctrl {
struct mii_timestamper *(*probe_channel)(struct device *device,
unsigned int port);
void (*release_channel)(struct device *device,
struct mii_timestamper *mii_ts);
};
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
int register_mii_tstamp_controller(struct device *device,
struct mii_timestamping_ctrl *ctrl);
void unregister_mii_tstamp_controller(struct device *device);
struct mii_timestamper *register_mii_timestamper(struct device_node *node,
unsigned int port);
void unregister_mii_timestamper(struct mii_timestamper *mii_ts);
#else
static inline
int register_mii_tstamp_controller(struct device *device,
struct mii_timestamping_ctrl *ctrl)
{
return -EOPNOTSUPP;
}
static inline void unregister_mii_tstamp_controller(struct device *device)
{
}
static inline
struct mii_timestamper *register_mii_timestamper(struct device_node *node,
unsigned int port)
{
return NULL;
}
static inline void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
{
}
#endif
#endif

View File

@@ -47,7 +47,7 @@
#define DEFAULT_UAR_PAGE_SHIFT 12
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
#define MAX_MSIX 128
#define MIN_MSIX_P_PORT 5
#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
(dev_cap).num_ports * MIN_MSIX_P_PORT)

View File

@@ -1105,6 +1105,7 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_MEM,
MLX5_CAP_RESERVED_16,
MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
/* NUM OF CAP Types */
MLX5_CAP_NUM
@@ -1120,6 +1121,9 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
MLX5_MCAM_REGS_NUM = 0x3,
};
enum mlx5_mcam_feature_groups {
@@ -1268,7 +1272,16 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
#define MLX5_CAP_MCAM_REG(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
mng_access_reg_cap_mask.access_regs.reg)
#define MLX5_CAP_MCAM_REG1(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
mng_access_reg_cap_mask.access_regs1.reg)
#define MLX5_CAP_MCAM_REG2(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
mng_access_reg_cap_mask.access_regs2.reg)
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
@@ -1297,6 +1310,14 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET(device_virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET64(device_virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,

View File

@@ -145,6 +145,8 @@ enum {
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
MLX5_REG_MIRC = 0x9162,
MLX5_REG_RESOURCE_DUMP = 0xC000,
};
enum mlx5_qpts_trust_state {
@@ -684,7 +686,7 @@ struct mlx5_core_dev {
u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
@@ -928,8 +930,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev,
int size, struct mlx5_frag_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);

View File

@@ -48,6 +48,7 @@ enum {
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
};
#define LEFTOVERS_RULE_NUM 2
@@ -145,25 +146,27 @@ mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type,
int vport);
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
int max_num_groups,
u32 level,
u32 flags);
struct mlx5_flow_table_attr {
int prio;
int max_fte;
u32 level;
u32 flags;
struct mlx5_flow_table *next_ft;
struct {
int max_num_groups;
int num_reserved_entries;
} autogroup;
};
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
@@ -194,6 +197,7 @@ struct mlx5_fs_vlan {
enum {
FLOW_ACT_NO_APPEND = BIT(0),
FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1),
};
struct mlx5_flow_act {

View File

@@ -87,6 +87,7 @@ enum {
enum {
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
};
enum {
@@ -374,8 +375,17 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_esp_spi[0x1];
u8 reserved_at_58[0x2];
u8 bth_dst_qp[0x1];
u8 reserved_at_5b[0x5];
u8 reserved_at_5b[0x25];
u8 reserved_at_60[0x18];
u8 metadata_reg_c_7[0x1];
u8 metadata_reg_c_6[0x1];
u8 metadata_reg_c_5[0x1];
u8 metadata_reg_c_4[0x1];
u8 metadata_reg_c_3[0x1];
u8 metadata_reg_c_2[0x1];
u8 metadata_reg_c_1[0x1];
u8 metadata_reg_c_0[0x1];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -400,7 +410,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1];
u8 reserved_at_15[0x2];
u8 ignore_flow_level[0x1];
u8 reserved_at_16[0x1];
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
u8 reserved_at_19[0x7];
@@ -721,7 +732,9 @@ enum {
struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 fdb_to_vport_reg_c_id[0x8];
u8 reserved_at_8[0xf];
u8 reserved_at_8[0xd];
u8 fdb_modify_header_fwd_to_table[0x1];
u8 reserved_at_16[0x1];
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
@@ -822,7 +835,9 @@ struct mlx5_ifc_qos_cap_bits {
struct mlx5_ifc_debug_cap_bits {
u8 core_dump_general[0x1];
u8 core_dump_qp[0x1];
u8 reserved_at_2[0x1e];
u8 reserved_at_2[0x7];
u8 resource_dump[0x1];
u8 reserved_at_a[0x16];
u8 reserved_at_20[0x2];
u8 stall_detect[0x1];
@@ -953,6 +968,19 @@ struct mlx5_ifc_device_event_cap_bits {
u8 user_unaffiliated_events[4][0x40];
};
struct mlx5_ifc_device_virtio_emulation_cap_bits {
u8 reserved_at_0[0x20];
u8 reserved_at_20[0x13];
u8 log_doorbell_stride[0x5];
u8 reserved_at_38[0x3];
u8 log_doorbell_bar_size[0x5];
u8 doorbell_bar_offset[0x40];
u8 reserved_at_80[0x780];
};
enum {
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
@@ -1753,6 +1781,132 @@ struct mlx5_ifc_resize_field_select_bits {
u8 resize_field_select[0x20];
};
struct mlx5_ifc_resource_dump_bits {
u8 more_dump[0x1];
u8 inline_dump[0x1];
u8 reserved_at_2[0xa];
u8 seq_num[0x4];
u8 segment_type[0x10];
u8 reserved_at_20[0x10];
u8 vhca_id[0x10];
u8 index1[0x20];
u8 index2[0x20];
u8 num_of_obj1[0x10];
u8 num_of_obj2[0x10];
u8 reserved_at_a0[0x20];
u8 device_opaque[0x40];
u8 mkey[0x20];
u8 size[0x20];
u8 address[0x40];
u8 inline_data[52][0x20];
};
struct mlx5_ifc_resource_dump_menu_record_bits {
u8 reserved_at_0[0x4];
u8 num_of_obj2_supports_active[0x1];
u8 num_of_obj2_supports_all[0x1];
u8 must_have_num_of_obj2[0x1];
u8 support_num_of_obj2[0x1];
u8 num_of_obj1_supports_active[0x1];
u8 num_of_obj1_supports_all[0x1];
u8 must_have_num_of_obj1[0x1];
u8 support_num_of_obj1[0x1];
u8 must_have_index2[0x1];
u8 support_index2[0x1];
u8 must_have_index1[0x1];
u8 support_index1[0x1];
u8 segment_type[0x10];
u8 segment_name[4][0x20];
u8 index1_name[4][0x20];
u8 index2_name[4][0x20];
};
struct mlx5_ifc_resource_dump_segment_header_bits {
u8 length_dw[0x10];
u8 segment_type[0x10];
};
struct mlx5_ifc_resource_dump_command_segment_bits {
struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
u8 segment_called[0x10];
u8 vhca_id[0x10];
u8 index1[0x20];
u8 index2[0x20];
u8 num_of_obj1[0x10];
u8 num_of_obj2[0x10];
};
struct mlx5_ifc_resource_dump_error_segment_bits {
struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
u8 reserved_at_20[0x10];
u8 syndrome_id[0x10];
u8 reserved_at_40[0x40];
u8 error[8][0x20];
};
struct mlx5_ifc_resource_dump_info_segment_bits {
struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
u8 reserved_at_20[0x18];
u8 dump_version[0x8];
u8 hw_version[0x20];
u8 fw_version[0x20];
};
struct mlx5_ifc_resource_dump_menu_segment_bits {
struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
u8 reserved_at_20[0x10];
u8 num_of_records[0x10];
struct mlx5_ifc_resource_dump_menu_record_bits record[0];
};
struct mlx5_ifc_resource_dump_resource_segment_bits {
struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
u8 reserved_at_20[0x20];
u8 index1[0x20];
u8 index2[0x20];
u8 payload[0][0x20];
};
struct mlx5_ifc_resource_dump_terminate_segment_bits {
struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
};
struct mlx5_ifc_menu_resource_dump_response_bits {
struct mlx5_ifc_resource_dump_info_segment_bits info;
struct mlx5_ifc_resource_dump_command_segment_bits cmd;
struct mlx5_ifc_resource_dump_menu_segment_bits menu;
struct mlx5_ifc_resource_dump_terminate_segment_bits terminate;
};
enum {
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
@@ -2026,7 +2180,9 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
u8 rx_pause_transition_low[0x20];
u8 reserved_at_3c0[0x40];
u8 rx_discards_high[0x20];
u8 rx_discards_low[0x20];
u8 device_stall_minor_watermark_cnt_high[0x20];
@@ -2751,6 +2907,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap;
u8 reserved_at_0[0x8000];
};
@@ -3998,7 +4155,8 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_at_c0[0x18];
u8 ignore_flow_level[0x1];
u8 reserved_at_c1[0x17];
u8 modify_enable_mask[0x8];
u8 reserved_at_e0[0x20];
@@ -5466,15 +5624,32 @@ struct mlx5_ifc_add_action_in_bits {
u8 data[0x20];
};
struct mlx5_ifc_copy_action_in_bits {
u8 action_type[0x4];
u8 src_field[0xc];
u8 reserved_at_10[0x3];
u8 src_offset[0x5];
u8 reserved_at_18[0x3];
u8 length[0x5];
u8 reserved_at_20[0x4];
u8 dst_field[0xc];
u8 reserved_at_30[0x3];
u8 dst_offset[0x5];
u8 reserved_at_38[0x8];
};
union mlx5_ifc_set_action_in_add_action_in_auto_bits {
struct mlx5_ifc_set_action_in_bits set_action_in;
struct mlx5_ifc_add_action_in_bits add_action_in;
struct mlx5_ifc_copy_action_in_bits copy_action_in;
u8 reserved_at_0[0x40];
};
enum {
MLX5_ACTION_TYPE_SET = 0x1,
MLX5_ACTION_TYPE_ADD = 0x2,
MLX5_ACTION_TYPE_COPY = 0x3,
};
enum {
@@ -5510,6 +5685,8 @@ enum {
MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_6 = 0x57,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58,
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
};
@@ -8406,6 +8583,18 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_50g[0x4];
u8 fec_override_admin_25g[0x4];
u8 fec_override_admin_10g_40g[0x4];
u8 fec_override_cap_400g_8x[0x10];
u8 fec_override_cap_200g_4x[0x10];
u8 fec_override_cap_100g_2x[0x10];
u8 fec_override_cap_50g_1x[0x10];
u8 fec_override_admin_400g_8x[0x10];
u8 fec_override_admin_200g_4x[0x10];
u8 fec_override_admin_100g_2x[0x10];
u8 fec_override_admin_50g_1x[0x10];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -8732,7 +8921,9 @@ struct mlx5_ifc_mpegc_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x6d];
u8 reserved_at_0[0x68];
u8 fec_50G_per_lane_in_pplm[0x1];
u8 reserved_at_69[0x4];
u8 rx_icrc_encapsulated_counter[0x1];
u8 reserved_at_6e[0x4];
u8 ptys_extended_ethernet[0x1];
@@ -8817,6 +9008,28 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_31_to_0[0x20];
};
struct mlx5_ifc_mcam_access_reg_bits1 {
u8 regs_127_to_96[0x20];
u8 regs_95_to_64[0x20];
u8 regs_63_to_32[0x20];
u8 regs_31_to_0[0x20];
};
struct mlx5_ifc_mcam_access_reg_bits2 {
u8 regs_127_to_99[0x1d];
u8 mirc[0x1];
u8 regs_97_to_96[0x2];
u8 regs_95_to_64[0x20];
u8 regs_63_to_32[0x20];
u8 regs_31_to_0[0x20];
};
struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -8827,6 +9040,8 @@ struct mlx5_ifc_mcam_reg_bits {
union {
struct mlx5_ifc_mcam_access_reg_bits access_regs;
struct mlx5_ifc_mcam_access_reg_bits1 access_regs1;
struct mlx5_ifc_mcam_access_reg_bits2 access_regs2;
u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask;
@@ -9432,6 +9647,13 @@ struct mlx5_ifc_mcda_reg_bits {
u8 data[0][0x20];
};
struct mlx5_ifc_mirc_reg_bits {
u8 reserved_at_0[0x18];
u8 status_code[0x8];
u8 reserved_at_20[0x20];
};
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -9487,6 +9709,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcqi_reg_bits mcqi_reg;
struct mlx5_ifc_mcc_reg_bits mcc_reg;
struct mlx5_ifc_mcda_reg_bits mcda_reg;
struct mlx5_ifc_mirc_reg_bits mirc_reg;
u8 reserved_at_0[0x60e0];
};

View File

@@ -625,24 +625,19 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
static inline bool is_vmalloc_addr(const void *x)
{
#ifdef CONFIG_MMU
unsigned long addr = (unsigned long)x;
return addr >= VMALLOC_START && addr < VMALLOC_END;
#else
return false;
#endif
}
#ifndef is_ioremap_addr
#define is_ioremap_addr(x) is_vmalloc_addr(x)
#endif
#ifdef CONFIG_MMU
extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
#else
static inline bool is_vmalloc_addr(const void *x)
{
return false;
}
static inline int is_vmalloc_or_module_addr(const void *x)
{
return 0;

View File

@@ -41,8 +41,10 @@
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
#define SDIO_DEVICE_ID_BROADCOM_4359 0x4359
#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373
#define SDIO_DEVICE_ID_CYPRESS_43012 43012
#define SDIO_DEVICE_ID_CYPRESS_89359 0x4355
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402

View File

@@ -849,13 +849,9 @@ extern int module_sysfs_initialized;
#define __MODULE_STRING(x) __stringify(x)
#ifdef CONFIG_STRICT_MODULE_RWX
extern void set_all_modules_text_rw(void);
extern void set_all_modules_text_ro(void);
extern void module_enable_ro(const struct module *mod, bool after_init);
extern void module_disable_ro(const struct module *mod);
#else
static inline void set_all_modules_text_rw(void) { }
static inline void set_all_modules_text_ro(void) { }
static inline void module_enable_ro(const struct module *mod, bool after_init) { }
static inline void module_disable_ro(const struct module *mod) { }
#endif

View File

@@ -171,6 +171,7 @@ struct proto_ops {
int (*compat_getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
#endif
void (*show_fdinfo)(struct seq_file *m, struct socket *sock);
int (*sendmsg) (struct socket *sock, struct msghdr *m,
size_t total_len);
/* Notes for implementing recvmsg:

View File

@@ -53,8 +53,9 @@ enum {
NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */
NETIF_F_GSO_FRAGLIST_BIT, /* ... Fraglist GSO */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
NETIF_F_GSO_UDP_L4_BIT,
NETIF_F_GSO_FRAGLIST_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -80,6 +81,7 @@ enum {
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
/*
* Add your fresh new feature above and remember to update
@@ -150,6 +152,8 @@ enum {
#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
/* Finds the next feature with the highest number of the range of start till 0.
*/
@@ -226,6 +230,9 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
/* changeable features with no special hardware requirements */
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
/* Changeable features with no special hardware requirements that defaults to off. */
#define NETIF_F_SOFT_FEATURES_OFF NETIF_F_GRO_FRAGLIST
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \

View File

@@ -849,6 +849,8 @@ enum tc_setup_type {
TC_SETUP_QDISC_GRED,
TC_SETUP_QDISC_TAPRIO,
TC_SETUP_FT,
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -875,6 +877,7 @@ enum bpf_netdev_command {
struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct xdp_umem;
struct xdp_dev_bulk_queue;
struct netdev_bpf {
enum bpf_netdev_command command;
@@ -936,6 +939,11 @@ struct netdev_name_node {
int netdev_name_node_alt_create(struct net_device *dev, const char *name);
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
struct netdev_net_notifier {
struct list_head list;
struct notifier_block *nb;
};
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -1014,7 +1022,7 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
* Called when a user wants to change the Maximum Transfer Unit
* of a device.
*
* void (*ndo_tx_timeout)(struct net_device *dev);
* void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
* Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
@@ -1281,7 +1289,8 @@ struct net_device_ops {
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
void (*ndo_tx_timeout) (struct net_device *dev);
void (*ndo_tx_timeout) (struct net_device *dev,
unsigned int txqueue);
void (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
@@ -1707,6 +1716,7 @@ enum netdev_priv_flags {
* @miniq_ingress: ingress/clsact qdisc specific data for
* ingress processing
* @ingress_queue: XXX: need comments on this one
* @nf_hooks_ingress: netfilter hooks executed for ingress packets
* @broadcast: hw bcast address
*
* @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
@@ -1788,6 +1798,10 @@ enum netdev_priv_flags {
*
* @wol_enabled: Wake-on-LAN is enabled
*
* @net_notifier_list: List of per-net netdev notifier block
* that follow this device when it is moved
* to another network namespace.
*
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1983,12 +1997,10 @@ struct net_device {
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc *qdisc;
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
int watchdog_timeo;
struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_cpus_map;
@@ -1998,11 +2010,15 @@ struct net_device {
struct mini_Qdisc __rcu *miniq_egress;
#endif
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
/* These may be needed for future network-power-down code. */
struct timer_list watchdog_timer;
int watchdog_timeo;
int __percpu *pcpu_refcnt;
struct list_head todo_list;
int __percpu *pcpu_refcnt;
struct list_head link_watch_list;
@@ -2078,6 +2094,8 @@ struct net_device {
struct lock_class_key addr_list_lock_key;
bool proto_down;
unsigned wol_enabled:1;
struct list_head net_notifier_list;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2319,7 +2337,8 @@ struct napi_gro_cb {
/* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
/* 1 bit hole */
/* GRO is done by frag_list pointer chaining. */
u8 is_flist:1;
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2521,6 +2540,12 @@ int unregister_netdevice_notifier(struct notifier_block *nb);
int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
int unregister_netdevice_notifier_net(struct net *net,
struct notifier_block *nb);
int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn);
int unregister_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn);
struct netdev_notifier_info {
struct net_device *dev;
@@ -2687,6 +2712,7 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
@@ -2823,16 +2849,16 @@ static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
}
static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
__sum16 check, __wsum pseudo)
__wsum pseudo)
{
NAPI_GRO_CB(skb)->csum = ~pseudo;
NAPI_GRO_CB(skb)->csum_valid = 1;
}
#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
do { \
if (__skb_gro_checksum_convert_check(skb)) \
__skb_gro_checksum_convert(skb, check, \
__skb_gro_checksum_convert(skb, \
compute_pseudo(skb, proto)); \
} while (0)
@@ -3887,22 +3913,48 @@ void netif_device_attach(struct net_device *dev);
*/
enum {
NETIF_MSG_DRV = 0x0001,
NETIF_MSG_PROBE = 0x0002,
NETIF_MSG_LINK = 0x0004,
NETIF_MSG_TIMER = 0x0008,
NETIF_MSG_IFDOWN = 0x0010,
NETIF_MSG_IFUP = 0x0020,
NETIF_MSG_RX_ERR = 0x0040,
NETIF_MSG_TX_ERR = 0x0080,
NETIF_MSG_TX_QUEUED = 0x0100,
NETIF_MSG_INTR = 0x0200,
NETIF_MSG_TX_DONE = 0x0400,
NETIF_MSG_RX_STATUS = 0x0800,
NETIF_MSG_PKTDATA = 0x1000,
NETIF_MSG_HW = 0x2000,
NETIF_MSG_WOL = 0x4000,
NETIF_MSG_DRV_BIT,
NETIF_MSG_PROBE_BIT,
NETIF_MSG_LINK_BIT,
NETIF_MSG_TIMER_BIT,
NETIF_MSG_IFDOWN_BIT,
NETIF_MSG_IFUP_BIT,
NETIF_MSG_RX_ERR_BIT,
NETIF_MSG_TX_ERR_BIT,
NETIF_MSG_TX_QUEUED_BIT,
NETIF_MSG_INTR_BIT,
NETIF_MSG_TX_DONE_BIT,
NETIF_MSG_RX_STATUS_BIT,
NETIF_MSG_PKTDATA_BIT,
NETIF_MSG_HW_BIT,
NETIF_MSG_WOL_BIT,
/* When you add a new bit above, update netif_msg_class_names array
* in net/ethtool/common.c
*/
NETIF_MSG_CLASS_COUNT,
};
/* Both ethtool_ops interface and internal driver implementation use u32 */
static_assert(NETIF_MSG_CLASS_COUNT <= 32);
#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
#define NETIF_MSG_DRV __NETIF_MSG(DRV)
#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
#define NETIF_MSG_LINK __NETIF_MSG(LINK)
#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
#define NETIF_MSG_INTR __NETIF_MSG(INTR)
#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
#define NETIF_MSG_HW __NETIF_MSG(HW)
#define NETIF_MSG_WOL __NETIF_MSG(WOL)
#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
@@ -4393,6 +4445,15 @@ struct netdev_notifier_bonding_info {
void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);
#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
#else
static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
const void *data)
{
}
#endif
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
@@ -4554,6 +4615,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}

View File

@@ -9,17 +9,17 @@
#ifndef PADATA_H
#define PADATA_H
#include <linux/compiler_types.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/kobject.h>
#define PADATA_CPU_SERIAL 0x01
#define PADATA_CPU_PARALLEL 0x02
/**
* struct padata_priv - Embedded to the users data structure.
* struct padata_priv - Represents one job
*
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
@@ -42,7 +42,7 @@ struct padata_priv {
};
/**
* struct padata_list
* struct padata_list - one per work type per CPU
*
* @list: List head.
* @lock: List lock.
@@ -70,9 +70,6 @@ struct padata_serial_queue {
*
* @parallel: List to wait for parallelization.
* @reorder: List to wait for reordering after parallel processing.
* @serial: List to wait for serialization after reordering.
* @pwork: work struct for parallelization.
* @swork: work struct for serialization.
* @work: work struct for parallelization.
* @num_obj: Number of objects that are processed by this cpu.
*/
@@ -98,12 +95,11 @@ struct padata_cpumask {
* struct parallel_data - Internal control structure, covers everything
* that depends on the cpumask in use.
*
* @pinst: padata instance.
* @ps: padata_shell object.
* @pqueue: percpu padata queues used for parallelization.
* @squeue: percpu padata queues used for serialuzation.
* @reorder_objects: Number of objects waiting in the reorder queues.
* @refcnt: Number of objects holding a reference on this parallel_data.
* @max_seq_nr: Maximal used sequence number.
* @seq_nr: Sequence number of the parallelized data object.
* @processed: Number of already processed objects.
* @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
@@ -111,30 +107,44 @@ struct padata_cpumask {
* @lock: Reorder lock.
*/
struct parallel_data {
struct padata_instance *pinst;
struct padata_shell *ps;
struct padata_parallel_queue __percpu *pqueue;
struct padata_serial_queue __percpu *squeue;
atomic_t reorder_objects;
atomic_t refcnt;
atomic_t seq_nr;
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
struct work_struct reorder_work;
spinlock_t lock ____cacheline_aligned;
spinlock_t ____cacheline_aligned lock;
};
/**
* struct padata_shell - Wrapper around struct parallel_data, its
* purpose is to allow the underlying control structure to be replaced
* on the fly using RCU.
*
* @pinst: padat instance.
* @pd: Actual parallel_data structure which may be substituted on the fly.
* @opd: Pointer to old pd to be freed by padata_replace.
* @list: List entry in padata_instance list.
*/
struct padata_shell {
struct padata_instance *pinst;
struct parallel_data __rcu *pd;
struct parallel_data *opd;
struct list_head list;
};
/**
* struct padata_instance - The overall control structure.
*
* @cpu_notifier: cpu hotplug notifier.
* @node: Used by CPU hotplug.
* @parallel_wq: The workqueue used for parallel work.
* @serial_wq: The workqueue used for serial work.
* @pd: The internal control structure.
* @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
* @cpumask_change_notifier: Notifiers chain for user-defined notify
* callbacks that will be called when either @pcpu or @cbcpu
* or both cpumasks change.
* @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
@@ -143,9 +153,9 @@ struct padata_instance {
struct hlist_node node;
struct workqueue_struct *parallel_wq;
struct workqueue_struct *serial_wq;
struct parallel_data *pd;
struct list_head pslist;
struct padata_cpumask cpumask;
struct blocking_notifier_head cpumask_change_notifier;
struct padata_cpumask rcpumask;
struct kobject kobj;
struct mutex lock;
u8 flags;
@@ -156,15 +166,13 @@ struct padata_instance {
extern struct padata_instance *padata_alloc_possible(const char *name);
extern void padata_free(struct padata_instance *pinst);
extern int padata_do_parallel(struct padata_instance *pinst,
extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
extern void padata_free_shell(struct padata_shell *ps);
extern int padata_do_parallel(struct padata_shell *ps,
struct padata_priv *padata, int *cb_cpu);
extern void padata_do_serial(struct padata_priv *padata);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
extern int padata_start(struct padata_instance *pinst);
extern void padata_stop(struct padata_instance *pinst);
extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
struct notifier_block *nblock);
extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
struct notifier_block *nblock);
#endif

View File

@@ -17,10 +17,12 @@
#include <linux/linkmode.h>
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/mii_timestamper.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
#include <linux/u64_stats_sync.h>
#include <linux/atomic.h>
@@ -99,9 +101,11 @@ typedef enum {
PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
/* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
PHY_INTERFACE_MODE_10GKR,
/* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
PHY_INTERFACE_MODE_10GBASER,
PHY_INTERFACE_MODE_USXGMII,
/* 10GBASE-KR - with Clause 73 AN */
PHY_INTERFACE_MODE_10GKR,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -175,10 +179,12 @@ static inline const char *phy_modes(phy_interface_t interface)
return "rxaui";
case PHY_INTERFACE_MODE_XAUI:
return "xaui";
case PHY_INTERFACE_MODE_10GKR:
return "10gbase-kr";
case PHY_INTERFACE_MODE_10GBASER:
return "10gbase-r";
case PHY_INTERFACE_MODE_USXGMII:
return "usxgmii";
case PHY_INTERFACE_MODE_10GKR:
return "10gbase-kr";
default:
return "unknown";
}
@@ -207,6 +213,15 @@ struct sfp_bus;
struct sfp_upstream_ops;
struct sk_buff;
struct mdio_bus_stats {
u64_stats_t transfers;
u64_stats_t errors;
u64_stats_t writes;
u64_stats_t reads;
/* Must be last, add new statistics above */
struct u64_stats_sync syncp;
};
/*
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
@@ -219,6 +234,7 @@ struct mii_bus {
int (*read)(struct mii_bus *bus, int addr, int regnum);
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
int (*reset)(struct mii_bus *bus);
struct mdio_bus_stats stats[PHY_MAX_ADDR];
/*
* A lock to ensure that only one thing can read/write
@@ -327,6 +343,9 @@ struct phy_c45_device_ids {
u32 device_ids[8];
};
struct macsec_context;
struct macsec_ops;
/* phy_device: An instance of a PHY
*
* drv: Pointer to the driver for this PHY instance
@@ -349,6 +368,7 @@ struct phy_c45_device_ids {
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
* macsec_ops: MACsec offloading ops.
*
* speed, duplex, pause, supported, advertising, lp_advertising,
* and autoneg are used like in mii_if_info
@@ -441,12 +461,18 @@ struct phy_device {
struct sfp_bus *sfp_bus;
struct phylink *phylink;
struct net_device *attached_dev;
struct mii_timestamper *mii_ts;
u8 mdix;
u8 mdix_ctrl;
void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier);
void (*adjust_link)(struct net_device *dev);
#if IS_ENABLED(CONFIG_MACSEC)
/* MACsec management functions */
const struct macsec_ops *macsec_ops;
#endif
};
#define to_phy_device(d) container_of(to_mdio_device(d), \
struct phy_device, mdio)
@@ -546,29 +572,6 @@ struct phy_driver {
*/
int (*match_phy_device)(struct phy_device *phydev);
/* Handles ethtool queries for hardware time stamping. */
int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
/* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */
int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr);
/*
* Requests a Rx timestamp for 'skb'. If the skb is accepted,
* the phy driver promises to deliver it using netif_rx() as
* soon as a timestamp becomes available. One of the
* PTP_CLASS_ values is passed in 'type'. The function must
* return true if the skb is accepted for delivery.
*/
bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
/*
* Requests a Tx timestamp for 'skb'. The phy driver promises
* to deliver it using skb_complete_tx_timestamp() as soon as a
* timestamp becomes available. One of the PTP_CLASS_ values
* is passed in 'type'.
*/
void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
/* Some devices (e.g. qnap TS-119P II) require PHY register changes to
* enable Wake on LAN, so set_wol is provided to be called in the
* ethernet driver's set_wol function. */
@@ -936,6 +939,66 @@ static inline bool phy_polling_mode(struct phy_device *phydev)
return phydev->irq == PHY_POLL;
}
/**
* phy_has_hwtstamp - Tests whether a PHY time stamp configuration.
* @phydev: the phy_device struct
*/
static inline bool phy_has_hwtstamp(struct phy_device *phydev)
{
return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp;
}
/**
* phy_has_rxtstamp - Tests whether a PHY supports receive time stamping.
* @phydev: the phy_device struct
*/
static inline bool phy_has_rxtstamp(struct phy_device *phydev)
{
return phydev && phydev->mii_ts && phydev->mii_ts->rxtstamp;
}
/**
* phy_has_tsinfo - Tests whether a PHY reports time stamping and/or
* PTP hardware clock capabilities.
* @phydev: the phy_device struct
*/
static inline bool phy_has_tsinfo(struct phy_device *phydev)
{
return phydev && phydev->mii_ts && phydev->mii_ts->ts_info;
}
/**
* phy_has_txtstamp - Tests whether a PHY supports transmit time stamping.
* @phydev: the phy_device struct
*/
static inline bool phy_has_txtstamp(struct phy_device *phydev)
{
return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp;
}
static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
{
return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
}
static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
int type)
{
return phydev->mii_ts->rxtstamp(phydev->mii_ts, skb, type);
}
static inline int phy_ts_info(struct phy_device *phydev,
struct ethtool_ts_info *tsinfo)
{
return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo);
}
static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb,
int type)
{
phydev->mii_ts->txtstamp(phydev->mii_ts, skb, type);
}
/**
* phy_is_internal - Convenience function for testing if a PHY is internal
* @phydev: the phy_device struct
@@ -1088,17 +1151,21 @@ static inline void phy_unlock_mdio_bus(struct phy_device *phydev)
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
__printf(2, 3);
char *phy_attached_info_irq(struct phy_device *phydev)
__malloc;
void phy_attached_info(struct phy_device *phydev);
/* Clause 22 PHY */
int genphy_read_abilities(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart);
int genphy_config_eee_advert(struct phy_device *phydev);
int __genphy_config_aneg(struct phy_device *phydev, bool changed);
int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
int genphy_read_lpa(struct phy_device *phydev);
int genphy_read_status_fixed(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
@@ -1175,6 +1242,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);

View File

@@ -63,10 +63,12 @@ enum phylink_op_type {
* struct phylink_config - PHYLINK configuration structure
* @dev: a pointer to a struct device associated with the MAC
* @type: operation type of PHYLINK instance
* @pcs_poll: MAC PCS cannot provide link change interrupt
*/
struct phylink_config {
struct device *dev;
enum phylink_op_type type;
bool pcs_poll;
};
/**

View File

@@ -1,23 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_CRYPTO_ATMEL_H
#define __LINUX_CRYPTO_ATMEL_H
#include <linux/platform_data/dma-atmel.h>
/**
* struct crypto_dma_data - DMA data for AES/TDES/SHA
*/
struct crypto_dma_data {
struct at_dma_slave txdata;
struct at_dma_slave rxdata;
};
/**
* struct crypto_platform_data - board-specific AES/TDES/SHA configuration
* @dma_slave: DMA slave interface to use in data transfers.
*/
struct crypto_platform_data {
struct crypto_dma_data *dma_slave;
};
#endif /* __LINUX_CRYPTO_ATMEL_H */

View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PLATFORM_DATA_ETH_IXP4XX
#define __PLATFORM_DATA_ETH_IXP4XX
#include <linux/types.h>
#define IXP4XX_ETH_NPEA 0x00
#define IXP4XX_ETH_NPEB 0x10
#define IXP4XX_ETH_NPEC 0x20
/* Information about built-in Ethernet MAC interfaces */
struct eth_plat_info {
u8 phy; /* MII PHY ID, 0 - 31 */
u8 rxq; /* configurable, currently 0 - 31 only */
u8 txreadyq;
u8 hwaddr[6];
};
#endif

View File

@@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PLATFORM_DATA_WAN_IXP4XX_HSS_H
#define __PLATFORM_DATA_WAN_IXP4XX_HSS_H
#include <linux/types.h>
/* Information about built-in HSS (synchronous serial) interfaces */
struct hss_plat_info {
int (*set_clock)(int port, unsigned int clock_type);
int (*open)(int port, void *pdev,
void (*set_carrier_cb)(void *pdev, int carrier));
void (*close)(int port, void *pdev);
u8 txreadyq;
u32 timer_freq;
};
#endif

91
include/linux/psp-tee.h Normal file
View File

@@ -0,0 +1,91 @@
/* SPDX-License-Identifier: MIT */
/*
* AMD Trusted Execution Environment (TEE) interface
*
* Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
*
* Copyright 2019 Advanced Micro Devices, Inc.
*
*/
#ifndef __PSP_TEE_H_
#define __PSP_TEE_H_
#include <linux/types.h>
#include <linux/errno.h>
/* This file defines the Trusted Execution Environment (TEE) interface commands
* and the API exported by AMD Secure Processor driver to communicate with
* AMD-TEE Trusted OS.
*/
/**
* enum tee_cmd_id - TEE Interface Command IDs
* @TEE_CMD_ID_LOAD_TA: Load Trusted Application (TA) binary into
* TEE environment
* @TEE_CMD_ID_UNLOAD_TA: Unload TA binary from TEE environment
* @TEE_CMD_ID_OPEN_SESSION: Open session with loaded TA
* @TEE_CMD_ID_CLOSE_SESSION: Close session with loaded TA
* @TEE_CMD_ID_INVOKE_CMD: Invoke a command with loaded TA
* @TEE_CMD_ID_MAP_SHARED_MEM: Map shared memory
* @TEE_CMD_ID_UNMAP_SHARED_MEM: Unmap shared memory
*/
enum tee_cmd_id {
TEE_CMD_ID_LOAD_TA = 1,
TEE_CMD_ID_UNLOAD_TA,
TEE_CMD_ID_OPEN_SESSION,
TEE_CMD_ID_CLOSE_SESSION,
TEE_CMD_ID_INVOKE_CMD,
TEE_CMD_ID_MAP_SHARED_MEM,
TEE_CMD_ID_UNMAP_SHARED_MEM,
};
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
/**
* psp_tee_process_cmd() - Process command in Trusted Execution Environment
* @cmd_id: TEE command ID (&enum tee_cmd_id)
* @buf: Command buffer for TEE processing. On success, is updated
* with the response
* @len: Length of command buffer in bytes
* @status: On success, holds the TEE command execution status
*
* This function submits a command to the Trusted OS for processing in the
* TEE environment and waits for a response or until the command times out.
*
* Returns:
* 0 if TEE successfully processed the command
* -%ENODEV if PSP device not available
* -%EINVAL if invalid input
* -%ETIMEDOUT if TEE command timed out
* -%EBUSY if PSP device is not responsive
*/
int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
u32 *status);
/**
* psp_check_tee_status() - Checks whether there is a TEE which a driver can
* talk to.
*
* This function can be used by AMD-TEE driver to query if there is TEE with
* which it can communicate.
*
* Returns:
* 0 if the device has TEE
* -%ENODEV if there is no TEE available
*/
int psp_check_tee_status(void);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
static inline int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf,
size_t len, u32 *status)
{
return -ENODEV;
}
static inline int psp_check_tee_status(void)
{
return -ENODEV;
}
#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
#endif /* __PSP_TEE_H_ */

View File

@@ -243,6 +243,13 @@ int ptp_find_pin(struct ptp_clock *ptp,
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
/**
* ptp_cancel_worker_sync() - cancel ptp auxiliary clock
*
* @ptp: The clock obtained from ptp_clock_register().
*/
void ptp_cancel_worker_sync(struct ptp_clock *ptp);
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@@ -260,6 +267,8 @@ static inline int ptp_find_pin(struct ptp_clock *ptp,
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
unsigned long delay)
{ return -EOPNOTSUPP; }
static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
{ }
#endif

View File

@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/errno.h>
#endif

View File

@@ -76,7 +76,6 @@
#define FW_ASSERT_GENERAL_ATTN_IDX 32
#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
@@ -105,12 +104,19 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define MAX_NUM_LL2_RX_QUEUES 48
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
/* Number of LL2 RAM based queues */
#define MAX_NUM_LL2_RX_RAM_QUEUES 32
/* Number of LL2 context based queues */
#define MAX_NUM_LL2_RX_CTX_QUEUES 208
#define MAX_NUM_LL2_RX_QUEUES \
(MAX_NUM_LL2_RX_RAM_QUEUES + MAX_NUM_LL2_RX_CTX_QUEUES)
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 37
#define FW_REVISION_VERSION 7
#define FW_MINOR_VERSION 42
#define FW_REVISION_VERSION 2
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -132,10 +138,10 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS (MAX_FUNCTION_NUMBER_K2)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
@@ -222,6 +228,7 @@
#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD DQ_XCM_AGG_VAL_SEL_WORD5
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -340,6 +347,10 @@
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
/* DQ_DEMS_AGG_VAL_BASE */
#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
(DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -395,6 +406,7 @@
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB_E4 12
#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
@@ -425,8 +437,6 @@
#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
#define IGU_CMD_INT_ACK_BASE 0x0400
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
@@ -439,8 +449,6 @@
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
@@ -652,8 +660,8 @@
#define PBF_MAX_CMD_LINES 3328
/* Number of BTB blocks. Each block is 256B. */
#define BTB_MAX_BLOCKS 1440
#define BTB_MAX_BLOCKS_BB 1440
#define BTB_MAX_BLOCKS_K2 1840
/*****************/
/* PRS CONSTANTS */
/*****************/
@@ -730,6 +738,8 @@ enum protocol_type {
PROTOCOLID_PREROCE,
PROTOCOLID_COMMON,
PROTOCOLID_RESERVED1,
PROTOCOLID_RDMA,
PROTOCOLID_SCSI,
MAX_PROTOCOL_TYPE
};
@@ -750,6 +760,10 @@ union rdma_eqe_data {
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
};
struct tstorm_queue_zone {
__le32 reserved[2];
};
/* Ustorm Queue Zone */
struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
@@ -872,8 +886,8 @@ struct db_l2_dpm_data {
#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1
#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
#define DB_L2_DPM_DATA_TGFS_SRC_EN_MASK 0x1
#define DB_L2_DPM_DATA_TGFS_SRC_EN_SHIFT 31
};
/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */

View File

@@ -38,9 +38,11 @@
/********************/
#define ETH_HSI_VER_MAJOR 3
#define ETH_HSI_VER_MINOR 10
#define ETH_HSI_VER_MINOR 11
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
/* Maximum number of pinned L2 connections (CIDs) */
#define ETH_PINNED_CONN_MAX_NUM 32
#define ETH_CACHE_LINE_SIZE 64
#define ETH_RX_CQE_GAP 32
@@ -61,6 +63,7 @@
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
@@ -75,9 +78,8 @@
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
#define ETH_RX_BD_THRESHOLD 12
#define ETH_RX_BD_THRESHOLD 16
/* Num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
@@ -96,24 +98,24 @@
#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
#define ETH_TPA_MAX_AGGS_NUM 64
#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
#define ETH_TPA_MAX_AGGS_NUM 64
#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Control frame check constants */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
/* GFS constants */
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/* Destination port mode */
enum dest_port_mode {
DEST_PORT_PHY,
DEST_PORT_LOOPBACK,
DEST_PORT_PHY_LOOPBACK,
DEST_PORT_DROP,
MAX_DEST_PORT_MODE
enum dst_port_mode {
DST_PORT_PHY,
DST_PORT_LOOPBACK,
DST_PORT_PHY_LOOPBACK,
DST_PORT_DROP,
MAX_DST_PORT_MODE
};
/* Ethernet address type */
@@ -167,8 +169,8 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
@@ -244,8 +246,9 @@ struct eth_fast_path_rx_reg_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
u8 reserved;
__le16 flow_id;
u8 reserved1[11];
__le16 reserved2;
__le32 flow_id_or_resource_id;
u8 reserved1[7];
struct eth_pmd_flow_flags pmd_flags;
};
@@ -296,9 +299,10 @@ struct eth_fast_path_rx_tpa_start_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index;
u8 header_len;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
__le16 flow_id;
u8 reserved;
__le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE];
__le16 reserved2;
__le32 flow_id_or_resource_id;
u8 reserved[3];
struct eth_pmd_flow_flags pmd_flags;
};
@@ -407,6 +411,29 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data;
};
/* The parsing information data for the forth tx bd of a given packet. */
struct eth_tx_data_4th_bd {
u8 dst_vport_id;
u8 reserved4;
__le16 bitfields;
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0
#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F
#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1
#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F
#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9
__le16 reserved3;
};
/* The forth tx bd of a given packet */
struct eth_tx_4th_bd {
struct regpair addr; /* Single continuous buffer */
__le16 nbytes; /* Number of bytes in this BD */
struct eth_tx_data_4th_bd data; /* Parsing information data */
};
/* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd {
__le16 reserved0;
@@ -431,6 +458,7 @@ union eth_tx_bd_types {
struct eth_tx_1st_bd first_bd;
struct eth_tx_2nd_bd second_bd;
struct eth_tx_3rd_bd third_bd;
struct eth_tx_4th_bd fourth_bd;
struct eth_tx_bd reg_bd;
};
@@ -443,6 +471,12 @@ enum eth_tx_tunn_type {
MAX_ETH_TX_TUNN_TYPE
};
/* Mstorm Queue Zone */
struct mstorm_eth_queue_zone {
struct eth_rx_prod_data rx_producers;
__le32 reserved[3];
};
/* Ystorm Queue Zone */
struct xstorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;

View File

@@ -999,7 +999,6 @@ struct iscsi_conn_offload_params {
struct regpair r2tq_pbl_addr;
struct regpair xhq_pbl_addr;
struct regpair uhq_pbl_addr;
__le32 initial_ack;
__le16 physical_q0;
__le16 physical_q1;
u8 flags;
@@ -1011,10 +1010,10 @@ struct iscsi_conn_offload_params {
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
u8 pbl_page_size_log;
u8 pbe_page_size_log;
u8 default_cq;
__le16 reserved0;
__le32 stat_sn;
__le32 initial_ack;
};
/* iSCSI connection statistics */
@@ -1029,25 +1028,14 @@ struct iscsi_conn_stats_params {
__le32 reserved;
};
/* spe message header */
struct iscsi_slow_path_hdr {
u8 op_code;
u8 flags;
#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
};
/* iSCSI connection update params passed by driver to FW in ISCSI update
*ramrod.
*/
struct iscsi_conn_update_ramrod_params {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
u8 flags;
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
@@ -1065,7 +1053,7 @@ struct iscsi_conn_update_ramrod_params {
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
u8 reserved0[3];
u8 reserved3[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
__le32 max_recv_pdu_length;
@@ -1251,22 +1239,22 @@ enum iscsi_ramrod_cmd_id {
/* iSCSI connection termination request */
struct iscsi_spe_conn_mac_update {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
u8 reserved0[2];
u8 reserved2[2];
};
/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params tcp;
};
@@ -1275,44 +1263,36 @@ struct iscsi_spe_conn_offload {
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload_option2 {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params_opt2 tcp;
};
/* iSCSI collect connection statistics request */
struct iscsi_spe_conn_statistics {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
u8 reset_stats;
u8 reserved0[7];
u8 reserved2[7];
struct regpair stats_cnts_addr;
};
/* iSCSI connection termination request */
struct iscsi_spe_conn_termination {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
u8 abortive;
u8 reserved0[7];
u8 reserved2[7];
struct regpair queue_cnts_addr;
struct regpair query_params_addr;
};
/* iSCSI firmware function destroy parameters */
struct iscsi_spe_func_dstry {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le32 reserved1;
};
/* iSCSI firmware function init parameters */
struct iscsi_spe_func_init {
struct iscsi_slow_path_hdr hdr;
__le16 half_way_close_timeout;
u8 num_sq_pages_in_ring;
u8 num_r2tq_pages_in_ring;
@@ -1324,8 +1304,12 @@ struct iscsi_spe_func_init {
#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
struct iscsi_debug_modes debug_mode;
__le16 reserved1;
__le32 reserved2;
u8 params;
#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
#define ISCSI_SPE_FUNC_INIT_RESERVED1_MASK 0xF
#define ISCSI_SPE_FUNC_INIT_RESERVED1_SHIFT 4
u8 reserved2[7];
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};

View File

@@ -159,6 +159,7 @@ struct qed_dcbx_get {
enum qed_nvm_images {
QED_NVM_IMAGE_ISCSI_CFG,
QED_NVM_IMAGE_FCOE_CFG,
QED_NVM_IMAGE_MDUMP,
QED_NVM_IMAGE_NVM_CFG1,
QED_NVM_IMAGE_DEFAULT_CFG,
QED_NVM_IMAGE_NVM_META,
@@ -463,7 +464,7 @@ enum qed_db_rec_space {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \
#define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
(void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0x1FF
@@ -1177,6 +1178,17 @@ struct qed_common_ops {
#define GET_FIELD(value, name) \
(((value) >> (name ## _SHIFT)) & name ## _MASK)
#define GET_MFW_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _OFFSET))
#define SET_MFW_FIELD(name, field, value) \
do { \
(name) &= ~(field ## _MASK); \
(name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
} while (0)
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
/* Debug print definitions */
#define DP_ERR(cdev, fmt, ...) \
do { \

View File

@@ -52,6 +52,12 @@ enum qed_ll2_conn_type {
QED_LL2_TYPE_ROCE,
QED_LL2_TYPE_IWARP,
QED_LL2_TYPE_RESERVED3,
MAX_QED_LL2_CONN_TYPE
};
enum qed_ll2_rx_conn_type {
QED_LL2_RX_TYPE_LEGACY,
QED_LL2_RX_TYPE_CTX,
MAX_QED_LL2_RX_CONN_TYPE
};
@@ -165,6 +171,7 @@ struct qed_ll2_cbs {
};
struct qed_ll2_acquire_data_inputs {
enum qed_ll2_rx_conn_type rx_conn_type;
enum qed_ll2_conn_type conn_type;
u16 mtu;
u16 rx_num_desc;

View File

@@ -107,8 +107,9 @@ struct scsi_drv_cmdq {
struct scsi_init_func_params {
__le16 num_tasks;
u8 log_page_size;
u8 log_page_size_conn;
u8 debug_mode;
u8 reserved2[12];
u8 reserved2[11];
};
/* SCSI RQ/CQ/CMDQ firmware function init parameters */

View File

@@ -22,7 +22,6 @@ struct rcu_cblist {
struct rcu_head *head;
struct rcu_head **tail;
long len;
long len_lazy;
};
#define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
@@ -73,7 +72,6 @@ struct rcu_segcblist {
#else
long len;
#endif
long len_lazy;
u8 enabled;
u8 offloaded;
};

View File

@@ -40,6 +40,16 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
*/
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
/**
* list_tail_rcu - returns the prev pointer of the head of the list
* @head: the head of the list
*
* Note: This should only be used with the list header, and even then
* only if list_del() and similar primitives are not also used on the
* list header.
*/
#define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev)))
/*
* Check during list traversal that we are within an RCU reader
*/
@@ -173,7 +183,7 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
n->pprev = NULL;
WRITE_ONCE(n->pprev, NULL);
}
}
@@ -361,7 +371,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
* @cond: optional lockdep expression if called from non-RCU protection.
* @cond...: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
@@ -473,7 +483,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
static inline void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
n->pprev = LIST_POISON2;
WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
@@ -489,11 +499,11 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *next = old->next;
new->next = next;
new->pprev = old->pprev;
WRITE_ONCE(new->pprev, old->pprev);
rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
if (next)
new->next->pprev = &new->next;
old->pprev = LIST_POISON2;
WRITE_ONCE(new->next->pprev, &new->next);
WRITE_ONCE(old->pprev, LIST_POISON2);
}
/*
@@ -528,10 +538,10 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_node *first = h->first;
n->next = first;
n->pprev = &h->first;
WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_first_rcu(h), n);
if (first)
first->pprev = &n->next;
WRITE_ONCE(first->pprev, &n->next);
}
/**
@@ -564,7 +574,7 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
if (last) {
n->next = last->next;
n->pprev = &last->next;
WRITE_ONCE(n->pprev, &last->next);
rcu_assign_pointer(hlist_next_rcu(last), n);
} else {
hlist_add_head_rcu(n, h);
@@ -592,10 +602,10 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
static inline void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
WRITE_ONCE(n->pprev, next->pprev);
n->next = next;
rcu_assign_pointer(hlist_pprev_rcu(n), n);
next->pprev = &n->next;
WRITE_ONCE(next->pprev, &n->next);
}
/**
@@ -620,10 +630,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
n->pprev = &prev->next;
WRITE_ONCE(n->pprev, &prev->next);
rcu_assign_pointer(hlist_next_rcu(prev), n);
if (n->next)
n->next->pprev = &n->next;
WRITE_ONCE(n->next->pprev, &n->next);
}
#define __hlist_for_each_rcu(pos, head) \
@@ -636,7 +646,7 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
* @cond: optional lockdep expression if called from non-RCU protection.
* @cond...: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()

View File

@@ -34,13 +34,21 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
{
if (!hlist_nulls_unhashed(n)) {
__hlist_nulls_del(n);
n->pprev = NULL;
WRITE_ONCE(n->pprev, NULL);
}
}
/**
* hlist_nulls_first_rcu - returns the first element of the hash list.
* @head: the head of the list.
*/
#define hlist_nulls_first_rcu(head) \
(*((struct hlist_nulls_node __rcu __force **)&(head)->first))
/**
* hlist_nulls_next_rcu - returns the element of the list after @node.
* @node: element of the list.
*/
#define hlist_nulls_next_rcu(node) \
(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
@@ -66,7 +74,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
n->pprev = LIST_POISON2;
WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
@@ -94,10 +102,10 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
n->pprev = &h->first;
WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
first->pprev = &n->next;
WRITE_ONCE(first->pprev, &n->next);
}
/**
@@ -141,7 +149,7 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
* @head: the head for your list.
* @head: the head of the list.
* @member: the name of the hlist_nulls_node within the struct.
*
* The barrier() is needed to make sure compiler doesn't cache first element [1],
@@ -161,7 +169,7 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
* iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
* @head: the head for your list.
* @head: the head of the list.
* @member: the name of the hlist_nulls_node within the struct.
*/
#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \

View File

@@ -154,7 +154,7 @@ static inline void exit_tasks_rcu_finish(void) { }
*
* This macro resembles cond_resched(), except that it is defined to
* report potential quiescent states to RCU-tasks even if the cond_resched()
* machinery were to be shut off, as some advocate for PREEMPT kernels.
* machinery were to be shut off, as some advocate for PREEMPTION kernels.
*/
#define cond_resched_tasks_rcu_qs() \
do { \
@@ -167,7 +167,7 @@ do { \
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
#if defined(CONFIG_TREE_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
#include <linux/rcutiny.h>
@@ -400,22 +400,6 @@ do { \
__tmp; \
})
/**
* rcu_swap_protected() - swap an RCU and a regular pointer
* @rcu_ptr: RCU pointer
* @ptr: regular pointer
* @c: the conditions under which the dereference will take place
*
* Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and
* @c is the argument that is passed to the rcu_dereference_protected() call
* used to read that pointer.
*/
#define rcu_swap_protected(rcu_ptr, ptr, c) do { \
typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
rcu_assign_pointer((rcu_ptr), (ptr)); \
(ptr) = __tmp; \
} while (0)
/**
* rcu_access_pointer() - fetch RCU pointer with no dereferencing
* @p: The pointer to read
@@ -598,10 +582,10 @@ do { \
*
* You can avoid reading and understanding the next paragraph by
* following this rule: don't put anything in an rcu_read_lock() RCU
* read-side critical section that would block in a !PREEMPT kernel.
* read-side critical section that would block in a !PREEMPTION kernel.
* But if you want the full story, read on!
*
* In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
* In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
* it is illegal to block while in an RCU read-side critical section.
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
* kernel builds, RCU read-side critical sections may be preempted,
@@ -912,4 +896,8 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
return false;
}
/* kernel/ksysfs.c definitions */
extern int rcu_expedited;
extern int rcu_normal;
#endif /* __LINUX_RCUPDATE_H */

View File

@@ -85,6 +85,7 @@ static inline void rcu_scheduler_starting(void) { }
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_is_watching(void) { return true; }
static inline void rcu_momentary_dyntick_idle(void) { }
static inline void kfree_rcu_scheduler_running(void) { }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }

View File

@@ -38,6 +38,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
void rcu_momentary_dyntick_idle(void);
void kfree_rcu_scheduler_running(void);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);

14
include/linux/resctrl.h Normal file
View File

@@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RESCTRL_H
#define _RESCTRL_H
#ifdef CONFIG_PROC_CPU_RESCTRL
int proc_resctrl_show(struct seq_file *m,
struct pid_namespace *ns,
struct pid *pid,
struct task_struct *tsk);
#endif
#endif /* _RESCTRL_H */

View File

@@ -9,7 +9,6 @@
*/
#define SCHED_CPUFREQ_IOWAIT (1U << 0)
#define SCHED_CPUFREQ_MIGRATION (1U << 1)
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy;

View File

@@ -275,6 +275,61 @@ struct sfp_diag {
__be16 cal_v_offset;
} __packed;
/* SFF8024 defined constants */
enum {
SFF8024_ID_UNK = 0x00,
SFF8024_ID_SFF_8472 = 0x02,
SFF8024_ID_SFP = 0x03,
SFF8024_ID_DWDM_SFP = 0x0b,
SFF8024_ID_QSFP_8438 = 0x0c,
SFF8024_ID_QSFP_8436_8636 = 0x0d,
SFF8024_ID_QSFP28_8636 = 0x11,
SFF8024_ENCODING_UNSPEC = 0x00,
SFF8024_ENCODING_8B10B = 0x01,
SFF8024_ENCODING_4B5B = 0x02,
SFF8024_ENCODING_NRZ = 0x03,
SFF8024_ENCODING_8472_MANCHESTER= 0x04,
SFF8024_ENCODING_8472_SONET = 0x05,
SFF8024_ENCODING_8472_64B66B = 0x06,
SFF8024_ENCODING_8436_MANCHESTER= 0x06,
SFF8024_ENCODING_8436_SONET = 0x04,
SFF8024_ENCODING_8436_64B66B = 0x05,
SFF8024_ENCODING_256B257B = 0x07,
SFF8024_ENCODING_PAM4 = 0x08,
SFF8024_CONNECTOR_UNSPEC = 0x00,
/* codes 01-05 not supportable on SFP, but some modules have single SC */
SFF8024_CONNECTOR_SC = 0x01,
SFF8024_CONNECTOR_FIBERJACK = 0x06,
SFF8024_CONNECTOR_LC = 0x07,
SFF8024_CONNECTOR_MT_RJ = 0x08,
SFF8024_CONNECTOR_MU = 0x09,
SFF8024_CONNECTOR_SG = 0x0a,
SFF8024_CONNECTOR_OPTICAL_PIGTAIL= 0x0b,
SFF8024_CONNECTOR_MPO_1X12 = 0x0c,
SFF8024_CONNECTOR_MPO_2X16 = 0x0d,
SFF8024_CONNECTOR_HSSDC_II = 0x20,
SFF8024_CONNECTOR_COPPER_PIGTAIL= 0x21,
SFF8024_CONNECTOR_RJ45 = 0x22,
SFF8024_CONNECTOR_NOSEPARATE = 0x23,
SFF8024_CONNECTOR_MXC_2X16 = 0x24,
SFF8024_ECC_UNSPEC = 0x00,
SFF8024_ECC_100G_25GAUI_C2M_AOC = 0x01,
SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 0x02,
SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 0x03,
SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 0x04,
SFF8024_ECC_100GBASE_SR10 = 0x05,
SFF8024_ECC_100GBASE_CR4 = 0x0b,
SFF8024_ECC_25GBASE_CR_S = 0x0c,
SFF8024_ECC_25GBASE_CR_N = 0x0d,
SFF8024_ECC_10GBASE_T_SFI = 0x16,
SFF8024_ECC_10GBASE_T_SR = 0x1c,
SFF8024_ECC_5GBASE_T = 0x1d,
SFF8024_ECC_2_5GBASE_T = 0x1e,
};
/* SFP EEPROM registers */
enum {
SFP_PHYS_ID = 0x00,
@@ -309,34 +364,7 @@ enum {
SFP_SFF8472_COMPLIANCE = 0x5e,
SFP_CC_EXT = 0x5f,
SFP_PHYS_ID_SFF = 0x02,
SFP_PHYS_ID_SFP = 0x03,
SFP_PHYS_EXT_ID_SFP = 0x04,
SFP_CONNECTOR_UNSPEC = 0x00,
/* codes 01-05 not supportable on SFP, but some modules have single SC */
SFP_CONNECTOR_SC = 0x01,
SFP_CONNECTOR_FIBERJACK = 0x06,
SFP_CONNECTOR_LC = 0x07,
SFP_CONNECTOR_MT_RJ = 0x08,
SFP_CONNECTOR_MU = 0x09,
SFP_CONNECTOR_SG = 0x0a,
SFP_CONNECTOR_OPTICAL_PIGTAIL = 0x0b,
SFP_CONNECTOR_MPO_1X12 = 0x0c,
SFP_CONNECTOR_MPO_2X16 = 0x0d,
SFP_CONNECTOR_HSSDC_II = 0x20,
SFP_CONNECTOR_COPPER_PIGTAIL = 0x21,
SFP_CONNECTOR_RJ45 = 0x22,
SFP_CONNECTOR_NOSEPARATE = 0x23,
SFP_CONNECTOR_MXC_2X16 = 0x24,
SFP_ENCODING_UNSPEC = 0x00,
SFP_ENCODING_8B10B = 0x01,
SFP_ENCODING_4B5B = 0x02,
SFP_ENCODING_NRZ = 0x03,
SFP_ENCODING_8472_MANCHESTER = 0x04,
SFP_ENCODING_8472_SONET = 0x05,
SFP_ENCODING_8472_64B66B = 0x06,
SFP_ENCODING_256B257B = 0x07,
SFP_ENCODING_PAM4 = 0x08,
SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13),
SFP_OPTIONS_PAGING_A2 = BIT(12),
SFP_OPTIONS_RETIMER = BIT(11),
@@ -479,6 +507,8 @@ struct sfp_bus;
* @module_insert: called after a module has been detected to determine
* whether the module is supported for the upstream device.
* @module_remove: called after the module has been removed.
* @module_start: called after the PHY probe step
* @module_stop: called before the PHY is removed
* @link_down: called when the link is non-operational for whatever
* reason.
* @link_up: called when the link is operational.
@@ -492,6 +522,8 @@ struct sfp_upstream_ops {
void (*detach)(void *priv, struct sfp_bus *bus);
int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
void (*module_remove)(void *priv);
int (*module_start)(void *priv);
void (*module_stop)(void *priv);
void (*link_down)(void *priv);
void (*link_up)(void *priv);
int (*connect_phy)(void *priv, struct phy_device *);
@@ -501,10 +533,10 @@ struct sfp_upstream_ops {
#if IS_ENABLED(CONFIG_SFP)
int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
unsigned long *link_modes);
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
@@ -525,6 +557,12 @@ static inline int sfp_parse_port(struct sfp_bus *bus,
return PORT_OTHER;
}
static inline bool sfp_may_have_phy(struct sfp_bus *bus,
const struct sfp_eeprom_id *id)
{
return false;
}
static inline void sfp_parse_support(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
unsigned long *support)
@@ -532,7 +570,6 @@ static inline void sfp_parse_support(struct sfp_bus *bus,
}
static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
unsigned long *link_modes)
{
return PHY_INTERFACE_MODE_NA;

View File

@@ -592,6 +592,8 @@ enum {
SKB_GSO_UDP = 1 << 16,
SKB_GSO_UDP_L4 = 1 << 17,
SKB_GSO_FRAGLIST = 1 << 18,
};
#if BITS_PER_LONG > 32
@@ -1478,6 +1480,11 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
skb->next = NULL;
}
/* Iterate through singly-linked GSO fragments of an skb. */
#define skb_list_walk_safe(first, skb, next_skb) \
for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
(skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
static inline void skb_list_del_init(struct sk_buff *skb)
{
__list_del_entry(&skb->list);
@@ -3459,7 +3466,8 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
int *err, long *timeo_p,
const struct sk_buff *skb);
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff_head *queue,
@@ -3468,12 +3476,16 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff_head *queue,
unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
int *off, int *err);
@@ -3523,6 +3535,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
@@ -4091,6 +4105,9 @@ enum skb_ext_id {
#endif
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
TC_SKB_EXT,
#endif
#if IS_ENABLED(CONFIG_MPTCP)
SKB_EXT_MPTCP,
#endif
SKB_EXT_NUM, /* must be last */
};
@@ -4112,6 +4129,9 @@ struct skb_ext {
char data[0] __aligned(8);
};
struct skb_ext *__skb_ext_alloc(void);
void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
struct skb_ext *ext);
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_put(struct skb_ext *ext);

View File

@@ -547,6 +547,20 @@ struct sdw_slave_ops {
* @node: node for bus list
* @port_ready: Port ready completion flag for each Slave port
* @dev_num: Device Number assigned by Bus
* @probed: boolean tracking driver state
* @probe_complete: completion utility to control potential races
* on startup between driver probe/initialization and SoundWire
* Slave state changes/implementation-defined interrupts
* @enumeration_complete: completion utility to control potential races
* on startup between device enumeration and read/write access to the
* Slave device
* @initialization_complete: completion utility to control potential races
* on startup between device enumeration and settings being restored
* @unattach_request: mask field to keep track why the Slave re-attached and
* was re-initialized. This is useful to deal with potential race conditions
* between the Master suspending and the codec resuming, and make sure that
* when the Master triggered a reset the Slave is properly enumerated and
* initialized
*/
struct sdw_slave {
struct sdw_slave_id id;
@@ -561,6 +575,11 @@ struct sdw_slave {
struct list_head node;
struct completion *port_ready;
u16 dev_num;
bool probed;
struct completion probe_complete;
struct completion enumeration_complete;
struct completion initialization_complete;
u32 unattach_request;
};
#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)

View File

@@ -4,36 +4,174 @@
#ifndef __SDW_INTEL_H
#define __SDW_INTEL_H
#include <linux/irqreturn.h>
/**
* struct sdw_intel_ops: Intel audio driver callback ops
*
* @config_stream: configure the stream with the hw_params
* the first argument containing the context is mandatory
* struct sdw_intel_stream_params_data: configuration passed during
* the @params_stream callback, e.g. for interaction with DSP
* firmware.
*/
struct sdw_intel_ops {
int (*config_stream)(void *arg, void *substream,
void *dai, void *hw_params, int stream_num);
struct sdw_intel_stream_params_data {
struct snd_pcm_substream *substream;
struct snd_soc_dai *dai;
struct snd_pcm_hw_params *hw_params;
int link_id;
int alh_stream_id;
};
/**
* struct sdw_intel_res - Soundwire Intel resource structure
* struct sdw_intel_stream_free_data: configuration passed during
* the @free_stream callback, e.g. for interaction with DSP
* firmware.
*/
struct sdw_intel_stream_free_data {
struct snd_pcm_substream *substream;
struct snd_soc_dai *dai;
int link_id;
};
/**
* struct sdw_intel_ops: Intel audio driver callback ops
*
*/
struct sdw_intel_ops {
int (*params_stream)(struct device *dev,
struct sdw_intel_stream_params_data *params_data);
int (*free_stream)(struct device *dev,
struct sdw_intel_stream_free_data *free_data);
};
/**
* struct sdw_intel_acpi_info - Soundwire Intel information found in ACPI tables
* @handle: ACPI controller handle
* @count: link count found with "sdw-master-count" property
* @link_mask: bit-wise mask listing links enabled by BIOS menu
*
* this structure could be expanded to e.g. provide all the _ADR
* information in case the link_mask is not sufficient to identify
* platform capabilities.
*/
struct sdw_intel_acpi_info {
acpi_handle handle;
int count;
u32 link_mask;
};
struct sdw_intel_link_res;
/* Intel clock-stop/pm_runtime quirk definitions */
/*
* Force the clock to remain on during pm_runtime suspend. This might
* be needed if Slave devices do not have an alternate clock source or
* if the latency requirements are very strict.
*/
#define SDW_INTEL_CLK_STOP_NOT_ALLOWED BIT(0)
/*
* Stop the bus during pm_runtime suspend. If set, a complete bus
* reset and re-enumeration will be performed when the bus
* restarts. This mode shall not be used if Slave devices can generate
* in-band wakes.
*/
#define SDW_INTEL_CLK_STOP_TEARDOWN BIT(1)
/*
* Stop the bus during pm_suspend if Slaves are not wake capable
* (e.g. speaker amplifiers). The clock-stop mode is typically
* slightly higher power than when the IP is completely powered-off.
*/
#define SDW_INTEL_CLK_STOP_WAKE_CAPABLE_ONLY BIT(2)
/*
* Require a bus reset (and complete re-enumeration) when exiting
* clock stop modes. This may be needed if the controller power was
* turned off and all context lost. This quirk shall not be used if a
* Slave device needs to remain enumerated and keep its context,
* e.g. to provide the reasons for the wake, report acoustic events or
* pass a history buffer.
*/
#define SDW_INTEL_CLK_STOP_BUS_RESET BIT(3)
/**
* struct sdw_intel_ctx - context allocated by the controller
* driver probe
* @count: link count
* @mmio_base: mmio base of SoundWire registers, only used to check
* hardware capabilities after all power dependencies are settled.
* @link_mask: bit-wise mask listing SoundWire links reported by the
* Controller
* @handle: ACPI parent handle
* @links: information for each link (controller-specific and kept
* opaque here)
* @link_list: list to handle interrupts across all links
* @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers.
*/
struct sdw_intel_ctx {
int count;
void __iomem *mmio_base;
u32 link_mask;
acpi_handle handle;
struct sdw_intel_link_res *links;
struct list_head link_list;
struct mutex shim_lock; /* lock for access to shared SHIM registers */
};
/**
* struct sdw_intel_res - Soundwire Intel global resource structure,
* typically populated by the DSP driver
*
* @count: link count
* @mmio_base: mmio base of SoundWire registers
* @irq: interrupt number
* @handle: ACPI parent handle
* @parent: parent device
* @ops: callback ops
* @arg: callback arg
* @dev: device implementing hwparams and free callbacks
* @link_mask: bit-wise mask listing links selected by the DSP driver
* This mask may be a subset of the one reported by the controller since
* machine-specific quirks are handled in the DSP driver.
* @clock_stop_quirks: mask array of possible behaviors requested by the
* DSP driver. The quirks are common for all links for now.
*/
struct sdw_intel_res {
int count;
void __iomem *mmio_base;
int irq;
acpi_handle handle;
struct device *parent;
const struct sdw_intel_ops *ops;
void *arg;
struct device *dev;
u32 link_mask;
u32 clock_stop_quirks;
};
void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res);
void sdw_intel_exit(void *arg);
/*
* On Intel platforms, the SoundWire IP has dependencies on power
* rails shared with the DSP, and the initialization steps are split
* in three. First an ACPI scan to check what the firmware describes
* in DSDT tables, then an allocation step (with no hardware
* configuration but with all the relevant devices created) and last
* the actual hardware configuration. The final stage is a global
* interrupt enable which is controlled by the DSP driver. Splitting
* these phases helps simplify the boot flow and make early decisions
* on e.g. which machine driver to select (I2S mode, HDaudio or
* SoundWire).
*/
int sdw_intel_acpi_scan(acpi_handle *parent_handle,
struct sdw_intel_acpi_info *info);
void sdw_intel_process_wakeen_event(struct sdw_intel_ctx *ctx);
struct sdw_intel_ctx *
sdw_intel_probe(struct sdw_intel_res *res);
int sdw_intel_startup(struct sdw_intel_ctx *ctx);
void sdw_intel_exit(struct sdw_intel_ctx *ctx);
void sdw_intel_enable_irq(void __iomem *mmio_base, bool enable);
irqreturn_t sdw_intel_thread(int irq, void *dev_id);
#endif

View File

@@ -109,6 +109,18 @@ struct stmmac_axi {
bool axi_rb;
};
#define EST_GCL 1024
struct stmmac_est {
int enable;
u32 btr_offset[2];
u32 btr[2];
u32 ctr[2];
u32 ter;
u32 gcl_unaligned[EST_GCL];
u32 gcl[EST_GCL];
u32 gcl_size;
};
struct stmmac_rxq_cfg {
u8 mode_to_use;
u32 chan;
@@ -127,6 +139,7 @@ struct stmmac_txq_cfg {
u32 low_credit;
bool use_prio;
u32 prio;
int tbs_en;
};
struct plat_stmmacenet_data {
@@ -139,6 +152,7 @@ struct plat_stmmacenet_data {
struct device_node *phylink_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
struct stmmac_est *est;
int clk_csr;
int has_gmac;
int enh_desc;

View File

@@ -32,8 +32,6 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf);
int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
void stop_machine_park(int cpu);
void stop_machine_unpark(int cpu);
void stop_machine_yield(const struct cpumask *cpumask);
@@ -82,20 +80,6 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu,
return false;
}
static inline int stop_cpus(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg)
{
if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
return stop_one_cpu(raw_smp_processor_id(), fn, arg);
return -ENOENT;
}
static inline int try_stop_cpus(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg)
{
return stop_cpus(cpumask, fn, arg);
}
#endif /* CONFIG_SMP */
/*

View File

@@ -78,6 +78,27 @@ struct tcp_sack_block {
#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
#if IS_ENABLED(CONFIG_MPTCP)
struct mptcp_options_received {
u64 sndr_key;
u64 rcvr_key;
u64 data_ack;
u64 data_seq;
u32 subflow_seq;
u16 data_len;
u8 mp_capable : 1,
mp_join : 1,
dss : 1;
u8 use_map:1,
dsn64:1,
data_fin:1,
use_ack:1,
ack64:1,
mpc_map:1,
__unused:2;
};
#endif
struct tcp_options_received {
/* PAWS/RTTM data */
int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
@@ -95,6 +116,9 @@ struct tcp_options_received {
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
#if IS_ENABLED(CONFIG_MPTCP)
struct mptcp_options_received mptcp;
#endif
};
static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@@ -104,6 +128,11 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
#if IS_ENABLED(CONFIG_SMC)
rx_opt->smc_ok = 0;
#endif
#if IS_ENABLED(CONFIG_MPTCP)
rx_opt->mptcp.mp_capable = 0;
rx_opt->mptcp.mp_join = 0;
rx_opt->mptcp.dss = 0;
#endif
}
/* This is the max number of SACKS that we'll generate and process. It's safe
@@ -119,6 +148,9 @@ struct tcp_request_sock {
const struct tcp_request_sock_ops *af_specific;
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
#if IS_ENABLED(CONFIG_MPTCP)
bool is_mptcp;
#endif
u32 txhash;
u32 rcv_isn;
u32 snt_isn;
@@ -354,6 +386,8 @@ struct tcp_sock {
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
#endif
u16 timeout_rehash; /* Timeout-triggered rehash attempts */
u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */
/* Receiver side RTT estimation */
@@ -379,6 +413,9 @@ struct tcp_sock {
u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
* while socket was owned by user.
*/
#if IS_ENABLED(CONFIG_MPTCP)
bool is_mptcp;
#endif
#ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */

View File

@@ -109,8 +109,10 @@ enum tick_dep_bits {
TICK_DEP_BIT_PERF_EVENTS = 1,
TICK_DEP_BIT_SCHED = 2,
TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
TICK_DEP_BIT_RCU = 4
TICK_DEP_BIT_RCU = 4,
TICK_DEP_BIT_RCU_EXP = 5
};
#define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP
#define TICK_DEP_MASK_NONE 0
#define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
@@ -118,6 +120,7 @@ enum tick_dep_bits {
#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
#define TICK_DEP_MASK_RCU_EXP (1 << TICK_DEP_BIT_RCU_EXP)
#ifdef CONFIG_NO_HZ_COMMON
extern bool tick_nohz_enabled;

View File

@@ -192,6 +192,22 @@ enum trace_reg {
struct trace_event_call;
#define TRACE_FUNCTION_TYPE ((const char *)~0UL)
struct trace_event_fields {
const char *type;
union {
struct {
const char *name;
const int size;
const int align;
const int is_signed;
const int filter_type;
};
int (*define_fields)(struct trace_event_call *);
};
};
struct trace_event_class {
const char *system;
void *probe;
@@ -200,7 +216,7 @@ struct trace_event_class {
#endif
int (*reg)(struct trace_event_call *event,
enum trace_reg type, void *data);
int (*define_fields)(struct trace_event_call *);
struct trace_event_fields *fields_array;
struct list_head *(*get_fields)(struct trace_event_call *);
struct list_head fields;
int (*raw_init)(struct trace_event_call *);

View File

@@ -253,7 +253,7 @@ extern int usbnet_open(struct net_device *net);
extern int usbnet_stop(struct net_device *net);
extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
struct net_device *net);
extern void usbnet_tx_timeout(struct net_device *net);
extern void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue);
extern int usbnet_change_mtu(struct net_device *net, int new_mtu);
extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *);

View File

@@ -10,6 +10,8 @@
#include <linux/rbtree.h>
#include <linux/overflow.h>
#include <asm/vmalloc.h>
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
struct notifier_block; /* in notifier.h */