Merge branch 'linus/master' into rdma.git for-next
rdma.git merge resolution for the 4.19 merge window Conflicts: drivers/infiniband/core/rdma_core.c - Use the rdma code and revise with the new spelling for atomic_fetch_add_unless drivers/nvme/host/rdma.c - Replace max_sge with max_send_sge in new blk code drivers/nvme/target/rdma.c - Use the blk code and revise to use NULL for ib_post_recv when appropriate - Replace max_sge with max_recv_sge in new blk code net/rds/ib_send.c - Use the net code and revise to use NULL for ib_post_recv when appropriate Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -1058,27 +1058,20 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
|
||||
|
||||
/* Device properties */
|
||||
|
||||
#define MAX_ACPI_REFERENCE_ARGS 8
|
||||
struct acpi_reference_args {
|
||||
struct acpi_device *adev;
|
||||
size_t nargs;
|
||||
u64 args[MAX_ACPI_REFERENCE_ARGS];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
|
||||
acpi_object_type type, const union acpi_object **obj);
|
||||
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index, size_t num_args,
|
||||
struct acpi_reference_args *args);
|
||||
struct fwnode_reference_args *args);
|
||||
|
||||
static inline int acpi_node_get_property_reference(
|
||||
const struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index,
|
||||
struct acpi_reference_args *args)
|
||||
struct fwnode_reference_args *args)
|
||||
{
|
||||
return __acpi_node_get_property_reference(fwnode, name, index,
|
||||
MAX_ACPI_REFERENCE_ARGS, args);
|
||||
NR_FWNODE_REFERENCE_ARGS, args);
|
||||
}
|
||||
|
||||
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
|
||||
@@ -1096,14 +1089,6 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
|
||||
struct fwnode_handle *child);
|
||||
struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
|
||||
|
||||
struct fwnode_handle *
|
||||
acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
|
||||
struct fwnode_handle *prev);
|
||||
int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
|
||||
struct fwnode_handle **remote,
|
||||
struct fwnode_handle **port,
|
||||
struct fwnode_handle **endpoint);
|
||||
|
||||
struct acpi_probe_entry;
|
||||
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
|
||||
struct acpi_probe_entry *);
|
||||
@@ -1169,7 +1154,7 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
|
||||
static inline int
|
||||
__acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index, size_t num_args,
|
||||
struct acpi_reference_args *args)
|
||||
struct fwnode_reference_args *args)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
@@ -1177,7 +1162,7 @@ __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||
static inline int
|
||||
acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index,
|
||||
struct acpi_reference_args *args)
|
||||
struct fwnode_reference_args *args)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
38
include/linux/ascii85.h
Normal file
38
include/linux/ascii85.h
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (c) 2008 Intel Corporation
|
||||
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ASCII85_H_
|
||||
#define _ASCII85_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#define ASCII85_BUFSZ 6
|
||||
|
||||
static inline long
|
||||
ascii85_encode_len(long len)
|
||||
{
|
||||
return DIV_ROUND_UP(len, 4);
|
||||
}
|
||||
|
||||
static inline const char *
|
||||
ascii85_encode(u32 in, char *out)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (in == 0)
|
||||
return "z";
|
||||
|
||||
out[5] = '\0';
|
||||
for (i = 5; i--; ) {
|
||||
out[i] = '!' + in % 85;
|
||||
in /= 85;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
#endif
|
@@ -2,6 +2,8 @@
|
||||
/* Atomic operations usable in machine independent code */
|
||||
#ifndef _LINUX_ATOMIC_H
|
||||
#define _LINUX_ATOMIC_H
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
@@ -36,40 +38,46 @@
|
||||
* barriers on top of the relaxed variant. In the case where the relaxed
|
||||
* variant is already fully ordered, no additional barriers are needed.
|
||||
*
|
||||
* Besides, if an arch has a special barrier for acquire/release, it could
|
||||
* implement its own __atomic_op_* and use the same framework for building
|
||||
* variants
|
||||
*
|
||||
* If an architecture overrides __atomic_op_acquire() it will probably want
|
||||
* to define smp_mb__after_spinlock().
|
||||
* If an architecture overrides __atomic_acquire_fence() it will probably
|
||||
* want to define smp_mb__after_spinlock().
|
||||
*/
|
||||
#ifndef __atomic_op_acquire
|
||||
#ifndef __atomic_acquire_fence
|
||||
#define __atomic_acquire_fence smp_mb__after_atomic
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_release_fence
|
||||
#define __atomic_release_fence smp_mb__before_atomic
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_pre_full_fence
|
||||
#define __atomic_pre_full_fence smp_mb__before_atomic
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_post_full_fence
|
||||
#define __atomic_post_full_fence smp_mb__after_atomic
|
||||
#endif
|
||||
|
||||
#define __atomic_op_acquire(op, args...) \
|
||||
({ \
|
||||
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
|
||||
smp_mb__after_atomic(); \
|
||||
__atomic_acquire_fence(); \
|
||||
__ret; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_op_release
|
||||
#define __atomic_op_release(op, args...) \
|
||||
({ \
|
||||
smp_mb__before_atomic(); \
|
||||
__atomic_release_fence(); \
|
||||
op##_relaxed(args); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_op_fence
|
||||
#define __atomic_op_fence(op, args...) \
|
||||
({ \
|
||||
typeof(op##_relaxed(args)) __ret; \
|
||||
smp_mb__before_atomic(); \
|
||||
__atomic_pre_full_fence(); \
|
||||
__ret = op##_relaxed(args); \
|
||||
smp_mb__after_atomic(); \
|
||||
__atomic_post_full_fence(); \
|
||||
__ret; \
|
||||
})
|
||||
#endif
|
||||
|
||||
/* atomic_add_return_relaxed */
|
||||
#ifndef atomic_add_return_relaxed
|
||||
@@ -95,11 +103,23 @@
|
||||
#endif
|
||||
#endif /* atomic_add_return_relaxed */
|
||||
|
||||
#ifndef atomic_inc
|
||||
#define atomic_inc(v) atomic_add(1, (v))
|
||||
#endif
|
||||
|
||||
/* atomic_inc_return_relaxed */
|
||||
#ifndef atomic_inc_return_relaxed
|
||||
|
||||
#ifndef atomic_inc_return
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
|
||||
#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
|
||||
#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
|
||||
#else /* atomic_inc_return */
|
||||
#define atomic_inc_return_relaxed atomic_inc_return
|
||||
#define atomic_inc_return_acquire atomic_inc_return
|
||||
#define atomic_inc_return_release atomic_inc_return
|
||||
#endif /* atomic_inc_return */
|
||||
|
||||
#else /* atomic_inc_return_relaxed */
|
||||
|
||||
@@ -143,11 +163,23 @@
|
||||
#endif
|
||||
#endif /* atomic_sub_return_relaxed */
|
||||
|
||||
#ifndef atomic_dec
|
||||
#define atomic_dec(v) atomic_sub(1, (v))
|
||||
#endif
|
||||
|
||||
/* atomic_dec_return_relaxed */
|
||||
#ifndef atomic_dec_return_relaxed
|
||||
|
||||
#ifndef atomic_dec_return
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
|
||||
#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
|
||||
#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
|
||||
#else /* atomic_dec_return */
|
||||
#define atomic_dec_return_relaxed atomic_dec_return
|
||||
#define atomic_dec_return_acquire atomic_dec_return
|
||||
#define atomic_dec_return_release atomic_dec_return
|
||||
#endif /* atomic_dec_return */
|
||||
|
||||
#else /* atomic_dec_return_relaxed */
|
||||
|
||||
@@ -328,12 +360,22 @@
|
||||
#endif
|
||||
#endif /* atomic_fetch_and_relaxed */
|
||||
|
||||
#ifdef atomic_andnot
|
||||
/* atomic_fetch_andnot_relaxed */
|
||||
#ifndef atomic_andnot
|
||||
#define atomic_andnot(i, v) atomic_and(~(int)(i), (v))
|
||||
#endif
|
||||
|
||||
#ifndef atomic_fetch_andnot_relaxed
|
||||
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
|
||||
#define atomic_fetch_andnot_acquire atomic_fetch_andnot
|
||||
#define atomic_fetch_andnot_release atomic_fetch_andnot
|
||||
|
||||
#ifndef atomic_fetch_andnot
|
||||
#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v))
|
||||
#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v))
|
||||
#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v))
|
||||
#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v))
|
||||
#else /* atomic_fetch_andnot */
|
||||
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
|
||||
#define atomic_fetch_andnot_acquire atomic_fetch_andnot
|
||||
#define atomic_fetch_andnot_release atomic_fetch_andnot
|
||||
#endif /* atomic_fetch_andnot */
|
||||
|
||||
#else /* atomic_fetch_andnot_relaxed */
|
||||
|
||||
@@ -352,7 +394,6 @@
|
||||
__atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
|
||||
#endif
|
||||
#endif /* atomic_fetch_andnot_relaxed */
|
||||
#endif /* atomic_andnot */
|
||||
|
||||
/* atomic_fetch_xor_relaxed */
|
||||
#ifndef atomic_fetch_xor_relaxed
|
||||
@@ -519,113 +560,141 @@
|
||||
#endif
|
||||
#endif /* xchg_relaxed */
|
||||
|
||||
/**
|
||||
* atomic_fetch_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, if @v was not already @u.
|
||||
* Returns the original value of @v.
|
||||
*/
|
||||
#ifndef atomic_fetch_add_unless
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c = atomic_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c == u))
|
||||
break;
|
||||
} while (!atomic_try_cmpxchg(v, &c, c + a));
|
||||
|
||||
return c;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* atomic_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
* Atomically adds @a to @v, if @v was not already @u.
|
||||
* Returns true if the addition was done.
|
||||
*/
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline bool atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
return __atomic_add_unless(v, a, u) != u;
|
||||
return atomic_fetch_add_unless(v, a, u) != u;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_inc_not_zero - increment unless the number is zero
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1, so long as @v is non-zero.
|
||||
* Returns non-zero if @v was non-zero, and zero otherwise.
|
||||
* Atomically increments @v by 1, if @v is non-zero.
|
||||
* Returns true if the increment was done.
|
||||
*/
|
||||
#ifndef atomic_inc_not_zero
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
#endif
|
||||
|
||||
#ifndef atomic_andnot
|
||||
static inline void atomic_andnot(int i, atomic_t *v)
|
||||
/**
|
||||
* atomic_inc_and_test - increment and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
#ifndef atomic_inc_and_test
|
||||
static inline bool atomic_inc_and_test(atomic_t *v)
|
||||
{
|
||||
atomic_and(~i, v);
|
||||
}
|
||||
|
||||
static inline int atomic_fetch_andnot(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_fetch_and(~i, v);
|
||||
}
|
||||
|
||||
static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_fetch_and_relaxed(~i, v);
|
||||
}
|
||||
|
||||
static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_fetch_and_acquire(~i, v);
|
||||
}
|
||||
|
||||
static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_fetch_and_release(~i, v);
|
||||
return atomic_inc_return(v) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* atomic_inc_not_zero_hint - increment if not null
|
||||
* atomic_dec_and_test - decrement and test
|
||||
* @v: pointer of type atomic_t
|
||||
* @hint: probable value of the atomic before the increment
|
||||
*
|
||||
* This version of atomic_inc_not_zero() gives a hint of probable
|
||||
* value of the atomic. This helps processor to not read the memory
|
||||
* before doing the atomic read/modify/write cycle, lowering
|
||||
* number of bus transactions on some arches.
|
||||
*
|
||||
* Returns: 0 if increment was not done, 1 otherwise.
|
||||
* Atomically decrements @v by 1 and
|
||||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
#ifndef atomic_inc_not_zero_hint
|
||||
static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
|
||||
#ifndef atomic_dec_and_test
|
||||
static inline bool atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
int val, c = hint;
|
||||
return atomic_dec_return(v) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* sanity test, should be removed by compiler if hint is a constant */
|
||||
if (!hint)
|
||||
return atomic_inc_not_zero(v);
|
||||
/**
|
||||
* atomic_sub_and_test - subtract value from variable and test result
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns
|
||||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
#ifndef atomic_sub_and_test
|
||||
static inline bool atomic_sub_and_test(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_sub_return(i, v) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
do {
|
||||
val = atomic_cmpxchg(v, c, c + 1);
|
||||
if (val == c)
|
||||
return 1;
|
||||
c = val;
|
||||
} while (c);
|
||||
|
||||
return 0;
|
||||
/**
|
||||
* atomic_add_negative - add and test if negative
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns true
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
#ifndef atomic_add_negative
|
||||
static inline bool atomic_add_negative(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(i, v) < 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef atomic_inc_unless_negative
|
||||
static inline int atomic_inc_unless_negative(atomic_t *p)
|
||||
static inline bool atomic_inc_unless_negative(atomic_t *v)
|
||||
{
|
||||
int v, v1;
|
||||
for (v = 0; v >= 0; v = v1) {
|
||||
v1 = atomic_cmpxchg(p, v, v + 1);
|
||||
if (likely(v1 == v))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
int c = atomic_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c < 0))
|
||||
return false;
|
||||
} while (!atomic_try_cmpxchg(v, &c, c + 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef atomic_dec_unless_positive
|
||||
static inline int atomic_dec_unless_positive(atomic_t *p)
|
||||
static inline bool atomic_dec_unless_positive(atomic_t *v)
|
||||
{
|
||||
int v, v1;
|
||||
for (v = 0; v <= 0; v = v1) {
|
||||
v1 = atomic_cmpxchg(p, v, v - 1);
|
||||
if (likely(v1 == v))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
int c = atomic_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c > 0))
|
||||
return false;
|
||||
} while (!atomic_try_cmpxchg(v, &c, c - 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p)
|
||||
#ifndef atomic_dec_if_positive
|
||||
static inline int atomic_dec_if_positive(atomic_t *v)
|
||||
{
|
||||
int c, old, dec;
|
||||
c = atomic_read(v);
|
||||
for (;;) {
|
||||
int dec, c = atomic_read(v);
|
||||
|
||||
do {
|
||||
dec = c - 1;
|
||||
if (unlikely(dec < 0))
|
||||
break;
|
||||
old = atomic_cmpxchg((v), c, dec);
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
} while (!atomic_try_cmpxchg(v, &c, dec));
|
||||
|
||||
return dec;
|
||||
}
|
||||
#endif
|
||||
@@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
||||
#endif
|
||||
#endif /* atomic64_add_return_relaxed */
|
||||
|
||||
#ifndef atomic64_inc
|
||||
#define atomic64_inc(v) atomic64_add(1, (v))
|
||||
#endif
|
||||
|
||||
/* atomic64_inc_return_relaxed */
|
||||
#ifndef atomic64_inc_return_relaxed
|
||||
|
||||
#ifndef atomic64_inc_return
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
||||
#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
|
||||
#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
|
||||
#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
|
||||
#else /* atomic64_inc_return */
|
||||
#define atomic64_inc_return_relaxed atomic64_inc_return
|
||||
#define atomic64_inc_return_acquire atomic64_inc_return
|
||||
#define atomic64_inc_return_release atomic64_inc_return
|
||||
#endif /* atomic64_inc_return */
|
||||
|
||||
#else /* atomic64_inc_return_relaxed */
|
||||
|
||||
@@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
||||
#endif
|
||||
#endif /* atomic64_sub_return_relaxed */
|
||||
|
||||
#ifndef atomic64_dec
|
||||
#define atomic64_dec(v) atomic64_sub(1, (v))
|
||||
#endif
|
||||
|
||||
/* atomic64_dec_return_relaxed */
|
||||
#ifndef atomic64_dec_return_relaxed
|
||||
|
||||
#ifndef atomic64_dec_return
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
||||
#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
|
||||
#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
|
||||
#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
|
||||
#else /* atomic64_dec_return */
|
||||
#define atomic64_dec_return_relaxed atomic64_dec_return
|
||||
#define atomic64_dec_return_acquire atomic64_dec_return
|
||||
#define atomic64_dec_return_release atomic64_dec_return
|
||||
#endif /* atomic64_dec_return */
|
||||
|
||||
#else /* atomic64_dec_return_relaxed */
|
||||
|
||||
@@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
||||
#endif
|
||||
#endif /* atomic64_fetch_and_relaxed */
|
||||
|
||||
#ifdef atomic64_andnot
|
||||
/* atomic64_fetch_andnot_relaxed */
|
||||
#ifndef atomic64_andnot
|
||||
#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v))
|
||||
#endif
|
||||
|
||||
#ifndef atomic64_fetch_andnot_relaxed
|
||||
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
|
||||
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
|
||||
#define atomic64_fetch_andnot_release atomic64_fetch_andnot
|
||||
|
||||
#ifndef atomic64_fetch_andnot
|
||||
#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v))
|
||||
#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v))
|
||||
#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v))
|
||||
#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v))
|
||||
#else /* atomic64_fetch_andnot */
|
||||
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
|
||||
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
|
||||
#define atomic64_fetch_andnot_release atomic64_fetch_andnot
|
||||
#endif /* atomic64_fetch_andnot */
|
||||
|
||||
#else /* atomic64_fetch_andnot_relaxed */
|
||||
|
||||
@@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
||||
__atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
|
||||
#endif
|
||||
#endif /* atomic64_fetch_andnot_relaxed */
|
||||
#endif /* atomic64_andnot */
|
||||
|
||||
/* atomic64_fetch_xor_relaxed */
|
||||
#ifndef atomic64_fetch_xor_relaxed
|
||||
@@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
||||
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
|
||||
#endif /* atomic64_try_cmpxchg */
|
||||
|
||||
#ifndef atomic64_andnot
|
||||
static inline void atomic64_andnot(long long i, atomic64_t *v)
|
||||
/**
|
||||
* atomic64_fetch_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic64_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, if @v was not already @u.
|
||||
* Returns the original value of @v.
|
||||
*/
|
||||
#ifndef atomic64_fetch_add_unless
|
||||
static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
|
||||
long long u)
|
||||
{
|
||||
atomic64_and(~i, v);
|
||||
long long c = atomic64_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c == u))
|
||||
break;
|
||||
} while (!atomic64_try_cmpxchg(v, &c, c + a));
|
||||
|
||||
return c;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* atomic64_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, if @v was not already @u.
|
||||
* Returns true if the addition was done.
|
||||
*/
|
||||
static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
{
|
||||
return atomic64_fetch_add_unless(v, a, u) != u;
|
||||
}
|
||||
|
||||
static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
|
||||
{
|
||||
return atomic64_fetch_and(~i, v);
|
||||
}
|
||||
/**
|
||||
* atomic64_inc_not_zero - increment unless the number is zero
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically increments @v by 1, if @v is non-zero.
|
||||
* Returns true if the increment was done.
|
||||
*/
|
||||
#ifndef atomic64_inc_not_zero
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||
#endif
|
||||
|
||||
static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
|
||||
/**
|
||||
* atomic64_inc_and_test - increment and test
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically increments @v by 1
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
#ifndef atomic64_inc_and_test
|
||||
static inline bool atomic64_inc_and_test(atomic64_t *v)
|
||||
{
|
||||
return atomic64_fetch_and_relaxed(~i, v);
|
||||
return atomic64_inc_return(v) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
|
||||
/**
|
||||
* atomic64_dec_and_test - decrement and test
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically decrements @v by 1 and
|
||||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
#ifndef atomic64_dec_and_test
|
||||
static inline bool atomic64_dec_and_test(atomic64_t *v)
|
||||
{
|
||||
return atomic64_fetch_and_acquire(~i, v);
|
||||
return atomic64_dec_return(v) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
|
||||
/**
|
||||
* atomic64_sub_and_test - subtract value from variable and test result
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns
|
||||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
#ifndef atomic64_sub_and_test
|
||||
static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
|
||||
{
|
||||
return atomic64_fetch_and_release(~i, v);
|
||||
return atomic64_sub_return(i, v) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* atomic64_add_negative - add and test if negative
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns true
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
#ifndef atomic64_add_negative
|
||||
static inline bool atomic64_add_negative(long long i, atomic64_t *v)
|
||||
{
|
||||
return atomic64_add_return(i, v) < 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef atomic64_inc_unless_negative
|
||||
static inline bool atomic64_inc_unless_negative(atomic64_t *v)
|
||||
{
|
||||
long long c = atomic64_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c < 0))
|
||||
return false;
|
||||
} while (!atomic64_try_cmpxchg(v, &c, c + 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef atomic64_dec_unless_positive
|
||||
static inline bool atomic64_dec_unless_positive(atomic64_t *v)
|
||||
{
|
||||
long long c = atomic64_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c > 0))
|
||||
return false;
|
||||
} while (!atomic64_try_cmpxchg(v, &c, c - 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* atomic64_dec_if_positive - decrement by 1 if old value positive
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* The function returns the old value of *v minus 1, even if
|
||||
* the atomic64 variable, v, was not decremented.
|
||||
*/
|
||||
#ifndef atomic64_dec_if_positive
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long long dec, c = atomic64_read(v);
|
||||
|
||||
do {
|
||||
dec = c - 1;
|
||||
if (unlikely(dec < 0))
|
||||
break;
|
||||
} while (!atomic64_try_cmpxchg(v, &c, dec));
|
||||
|
||||
return dec;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -117,6 +117,9 @@ struct filename;
|
||||
|
||||
extern void audit_log_session_info(struct audit_buffer *ab);
|
||||
|
||||
#define AUDIT_OFF 0
|
||||
#define AUDIT_ON 1
|
||||
#define AUDIT_LOCKED 2
|
||||
#ifdef CONFIG_AUDIT
|
||||
/* These are defined in audit.c */
|
||||
/* Public API */
|
||||
@@ -202,7 +205,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
|
||||
static inline void audit_log_task_info(struct audit_buffer *ab,
|
||||
struct task_struct *tsk)
|
||||
{ }
|
||||
#define audit_enabled 0
|
||||
#define audit_enabled AUDIT_OFF
|
||||
#endif /* CONFIG_AUDIT */
|
||||
|
||||
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
|
||||
|
@@ -429,7 +429,6 @@ extern void bio_put(struct bio *);
|
||||
|
||||
extern void __bio_clone_fast(struct bio *, struct bio *);
|
||||
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
|
||||
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
|
||||
|
||||
extern struct bio_set fs_bio_set;
|
||||
|
||||
@@ -443,12 +442,6 @@ static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
||||
return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
|
||||
}
|
||||
|
||||
static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
return bio_clone_bioset(bio, gfp_mask, NULL);
|
||||
|
||||
}
|
||||
|
||||
extern blk_qc_t submit_bio(struct bio *);
|
||||
|
||||
extern void bio_endio(struct bio *);
|
||||
@@ -496,9 +489,9 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
|
||||
extern void bio_set_pages_dirty(struct bio *bio);
|
||||
extern void bio_check_pages_dirty(struct bio *bio);
|
||||
|
||||
void generic_start_io_acct(struct request_queue *q, int rw,
|
||||
void generic_start_io_acct(struct request_queue *q, int op,
|
||||
unsigned long sectors, struct hd_struct *part);
|
||||
void generic_end_io_acct(struct request_queue *q, int rw,
|
||||
void generic_end_io_acct(struct request_queue *q, int op,
|
||||
struct hd_struct *part,
|
||||
unsigned long start_time);
|
||||
|
||||
@@ -553,8 +546,16 @@ do { \
|
||||
#define bio_dev(bio) \
|
||||
disk_devt((bio)->bi_disk)
|
||||
|
||||
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
|
||||
int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
|
||||
#else
|
||||
static inline int bio_associate_blkcg_from_page(struct bio *bio,
|
||||
struct page *page) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
|
||||
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
|
||||
void bio_disassociate_task(struct bio *bio);
|
||||
void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
|
||||
#else /* CONFIG_BLK_CGROUP */
|
||||
|
@@ -104,7 +104,7 @@
|
||||
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
|
||||
})
|
||||
|
||||
extern void __compiletime_warning("value doesn't fit into mask")
|
||||
extern void __compiletime_error("value doesn't fit into mask")
|
||||
__field_overflow(void);
|
||||
extern void __compiletime_error("bad bitfield mask")
|
||||
__bad_mask(void);
|
||||
@@ -121,8 +121,8 @@ static __always_inline u64 field_mask(u64 field)
|
||||
#define ____MAKE_OP(type,base,to,from) \
|
||||
static __always_inline __##type type##_encode_bits(base v, base field) \
|
||||
{ \
|
||||
if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
|
||||
__field_overflow(); \
|
||||
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
|
||||
__field_overflow(); \
|
||||
return to((v & field_mask(field)) * field_multiplier(field)); \
|
||||
} \
|
||||
static __always_inline __##type type##_replace_bits(__##type old, \
|
||||
@@ -143,6 +143,7 @@ static __always_inline base type##_get_bits(__##type v, base field) \
|
||||
____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
|
||||
____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
|
||||
____MAKE_OP(u##size,u##size,,)
|
||||
____MAKE_OP(u8,u8,,)
|
||||
__MAKE_OP(16)
|
||||
__MAKE_OP(32)
|
||||
__MAKE_OP(64)
|
||||
|
@@ -2,29 +2,9 @@
|
||||
#ifndef _LINUX_BITOPS_H
|
||||
#define _LINUX_BITOPS_H
|
||||
#include <asm/types.h>
|
||||
#include <linux/bits.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#define BIT(nr) (1UL << (nr))
|
||||
#define BIT_ULL(nr) (1ULL << (nr))
|
||||
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
|
||||
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
|
||||
#define BITS_PER_BYTE 8
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Create a contiguous bitmask starting at bit position @l and ending at
|
||||
* position @h. For example
|
||||
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
|
||||
*/
|
||||
#define GENMASK(h, l) \
|
||||
(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
|
||||
|
||||
#define GENMASK_ULL(h, l) \
|
||||
(((~0ULL) - (1ULL << (l)) + 1) & \
|
||||
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
|
||||
|
||||
extern unsigned int __sw_hweight8(unsigned int w);
|
||||
extern unsigned int __sw_hweight16(unsigned int w);
|
||||
|
26
include/linux/bits.h
Normal file
26
include/linux/bits.h
Normal file
@@ -0,0 +1,26 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LINUX_BITS_H
|
||||
#define __LINUX_BITS_H
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
#define BIT(nr) (1UL << (nr))
|
||||
#define BIT_ULL(nr) (1ULL << (nr))
|
||||
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
|
||||
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
|
||||
#define BITS_PER_BYTE 8
|
||||
|
||||
/*
|
||||
* Create a contiguous bitmask starting at bit position @l and ending at
|
||||
* position @h. For example
|
||||
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
|
||||
*/
|
||||
#define GENMASK(h, l) \
|
||||
(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
|
||||
|
||||
#define GENMASK_ULL(h, l) \
|
||||
(((~0ULL) - (1ULL << (l)) + 1) & \
|
||||
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
|
||||
|
||||
#endif /* __LINUX_BITS_H */
|
@@ -35,6 +35,7 @@ enum blkg_rwstat_type {
|
||||
BLKG_RWSTAT_WRITE,
|
||||
BLKG_RWSTAT_SYNC,
|
||||
BLKG_RWSTAT_ASYNC,
|
||||
BLKG_RWSTAT_DISCARD,
|
||||
|
||||
BLKG_RWSTAT_NR,
|
||||
BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
|
||||
@@ -136,6 +137,12 @@ struct blkcg_gq {
|
||||
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
|
||||
atomic_t use_delay;
|
||||
atomic64_t delay_nsec;
|
||||
atomic64_t delay_start;
|
||||
u64 last_delay;
|
||||
int last_use;
|
||||
};
|
||||
|
||||
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
|
||||
@@ -148,6 +155,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
|
||||
typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
|
||||
size_t size);
|
||||
|
||||
struct blkcg_policy {
|
||||
int plid;
|
||||
@@ -167,6 +176,7 @@ struct blkcg_policy {
|
||||
blkcg_pol_offline_pd_fn *pd_offline_fn;
|
||||
blkcg_pol_free_pd_fn *pd_free_fn;
|
||||
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
|
||||
blkcg_pol_stat_pd_fn *pd_stat_fn;
|
||||
};
|
||||
|
||||
extern struct blkcg blkcg_root;
|
||||
@@ -238,6 +248,42 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
|
||||
return css_to_blkcg(task_css(current, io_cgrp_id));
|
||||
}
|
||||
|
||||
static inline bool blk_cgroup_congested(void)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
css = kthread_blkcg();
|
||||
if (!css)
|
||||
css = task_css(current, io_cgrp_id);
|
||||
while (css) {
|
||||
if (atomic_read(&css->cgroup->congestion_count)) {
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
css = css->parent;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
|
||||
* @return: true if this bio needs to be submitted with the root blkg context.
|
||||
*
|
||||
* In order to avoid priority inversions we sometimes need to issue a bio as if
|
||||
* it were attached to the root blkg, and then backcharge to the actual owning
|
||||
* blkg. The idea is we do bio_blkcg() to look up the actual context for the
|
||||
* bio and attach the appropriate blkg to the bio. Then we call this helper and
|
||||
* if it is true run with the root blkg for that queue and then do any
|
||||
* backcharging to the originating cgroup once the io is complete.
|
||||
*/
|
||||
static inline bool bio_issue_as_root_blkg(struct bio *bio)
|
||||
{
|
||||
return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkcg_parent - get the parent of a blkcg
|
||||
* @blkcg: blkcg of interest
|
||||
@@ -295,6 +341,17 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
|
||||
return __blkg_lookup(blkcg, q, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
|
||||
* @q: request_queue of interest
|
||||
*
|
||||
* Lookup blkg for @q at the root level. See also blkg_lookup().
|
||||
*/
|
||||
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
|
||||
{
|
||||
return q->root_blkg;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_to_pdata - get policy private data
|
||||
* @blkg: blkg of interest
|
||||
@@ -355,6 +412,21 @@ static inline void blkg_get(struct blkcg_gq *blkg)
|
||||
atomic_inc(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_try_get - try and get a blkg reference
|
||||
* @blkg: blkg to get
|
||||
*
|
||||
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
|
||||
* of freeing this blkg, so we can only use it if the refcnt is not zero.
|
||||
*/
|
||||
static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
|
||||
{
|
||||
if (atomic_inc_not_zero(&blkg->refcnt))
|
||||
return blkg;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void __blkg_release_rcu(struct rcu_head *rcu);
|
||||
|
||||
/**
|
||||
@@ -589,7 +661,9 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
|
||||
{
|
||||
struct percpu_counter *cnt;
|
||||
|
||||
if (op_is_write(op))
|
||||
if (op_is_discard(op))
|
||||
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
|
||||
else if (op_is_write(op))
|
||||
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
|
||||
else
|
||||
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
|
||||
@@ -706,8 +780,14 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
||||
|
||||
if (!throtl) {
|
||||
blkg = blkg ?: q->root_blkg;
|
||||
blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
|
||||
bio->bi_iter.bi_size);
|
||||
/*
|
||||
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
|
||||
* is a split bio and we would have already accounted for the
|
||||
* size of the bio.
|
||||
*/
|
||||
if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
|
||||
blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
|
||||
bio->bi_iter.bi_size);
|
||||
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
|
||||
}
|
||||
|
||||
@@ -715,6 +795,59 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
||||
return !throtl;
|
||||
}
|
||||
|
||||
static inline void blkcg_use_delay(struct blkcg_gq *blkg)
|
||||
{
|
||||
if (atomic_add_return(1, &blkg->use_delay) == 1)
|
||||
atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
|
||||
}
|
||||
|
||||
static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
|
||||
{
|
||||
int old = atomic_read(&blkg->use_delay);
|
||||
|
||||
if (old == 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We do this song and dance because we can race with somebody else
|
||||
* adding or removing delay. If we just did an atomic_dec we'd end up
|
||||
* negative and we'd already be in trouble. We need to subtract 1 and
|
||||
* then check to see if we were the last delay so we can drop the
|
||||
* congestion count on the cgroup.
|
||||
*/
|
||||
while (old) {
|
||||
int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
|
||||
if (cur == old)
|
||||
break;
|
||||
old = cur;
|
||||
}
|
||||
|
||||
if (old == 0)
|
||||
return 0;
|
||||
if (old == 1)
|
||||
atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
|
||||
{
|
||||
int old = atomic_read(&blkg->use_delay);
|
||||
if (!old)
|
||||
return;
|
||||
/* We only want 1 person clearing the congestion count for this blkg. */
|
||||
while (old) {
|
||||
int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
|
||||
if (cur == old) {
|
||||
atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
|
||||
break;
|
||||
}
|
||||
old = cur;
|
||||
}
|
||||
}
|
||||
|
||||
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
|
||||
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
|
||||
void blkcg_maybe_throttle_current(void);
|
||||
#else /* CONFIG_BLK_CGROUP */
|
||||
|
||||
struct blkcg {
|
||||
@@ -734,9 +867,16 @@ struct blkcg_policy {
|
||||
|
||||
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
|
||||
|
||||
static inline void blkcg_maybe_throttle_current(void) { }
|
||||
static inline bool blk_cgroup_congested(void) { return false; }
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
|
||||
|
||||
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
|
||||
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
|
||||
{ return NULL; }
|
||||
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
|
||||
static inline void blkcg_drain_queue(struct request_queue *q) { }
|
||||
static inline void blkcg_exit_queue(struct request_queue *q) { }
|
||||
|
@@ -35,10 +35,12 @@ struct blk_mq_hw_ctx {
|
||||
struct sbitmap ctx_map;
|
||||
|
||||
struct blk_mq_ctx *dispatch_from;
|
||||
unsigned int dispatch_busy;
|
||||
|
||||
struct blk_mq_ctx **ctxs;
|
||||
unsigned int nr_ctx;
|
||||
struct blk_mq_ctx **ctxs;
|
||||
|
||||
spinlock_t dispatch_wait_lock;
|
||||
wait_queue_entry_t dispatch_wait;
|
||||
atomic_t wait_index;
|
||||
|
||||
|
@@ -179,10 +179,8 @@ struct bio {
|
||||
*/
|
||||
struct io_context *bi_ioc;
|
||||
struct cgroup_subsys_state *bi_css;
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||
void *bi_cg_private;
|
||||
struct blkcg_gq *bi_blkg;
|
||||
struct bio_issue bi_issue;
|
||||
#endif
|
||||
#endif
|
||||
union {
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
@@ -329,7 +327,7 @@ enum req_flag_bits {
|
||||
|
||||
/* for driver use */
|
||||
__REQ_DRV,
|
||||
|
||||
__REQ_SWAP, /* swapping request. */
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
};
|
||||
|
||||
@@ -351,6 +349,7 @@ enum req_flag_bits {
|
||||
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
||||
|
||||
#define REQ_DRV (1ULL << __REQ_DRV)
|
||||
#define REQ_SWAP (1ULL << __REQ_SWAP)
|
||||
|
||||
#define REQ_FAILFAST_MASK \
|
||||
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
|
||||
@@ -358,6 +357,14 @@ enum req_flag_bits {
|
||||
#define REQ_NOMERGE_FLAGS \
|
||||
(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
|
||||
|
||||
enum stat_group {
|
||||
STAT_READ,
|
||||
STAT_WRITE,
|
||||
STAT_DISCARD,
|
||||
|
||||
NR_STAT_GROUPS
|
||||
};
|
||||
|
||||
#define bio_op(bio) \
|
||||
((bio)->bi_opf & REQ_OP_MASK)
|
||||
#define req_op(req) \
|
||||
@@ -395,6 +402,18 @@ static inline bool op_is_sync(unsigned int op)
|
||||
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
|
||||
}
|
||||
|
||||
static inline bool op_is_discard(unsigned int op)
|
||||
{
|
||||
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
|
||||
}
|
||||
|
||||
static inline int op_stat_group(unsigned int op)
|
||||
{
|
||||
if (op_is_discard(op))
|
||||
return STAT_DISCARD;
|
||||
return op_is_write(op);
|
||||
}
|
||||
|
||||
typedef unsigned int blk_qc_t;
|
||||
#define BLK_QC_T_NONE -1U
|
||||
#define BLK_QC_T_SHIFT 16
|
||||
|
@@ -27,8 +27,6 @@
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/blkzoned.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/u64_stats_sync.h>
|
||||
|
||||
struct module;
|
||||
struct scsi_ioctl_command;
|
||||
@@ -42,7 +40,7 @@ struct bsg_job;
|
||||
struct blkcg_gq;
|
||||
struct blk_flush_queue;
|
||||
struct pr_ops;
|
||||
struct rq_wb;
|
||||
struct rq_qos;
|
||||
struct blk_queue_stats;
|
||||
struct blk_stat_callback;
|
||||
|
||||
@@ -442,10 +440,8 @@ struct request_queue {
|
||||
int nr_rqs[2]; /* # allocated [a]sync rqs */
|
||||
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
|
||||
|
||||
atomic_t shared_hctx_restart;
|
||||
|
||||
struct blk_queue_stats *stats;
|
||||
struct rq_wb *rq_wb;
|
||||
struct rq_qos *rq_qos;
|
||||
|
||||
/*
|
||||
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
|
||||
@@ -592,6 +588,7 @@ struct request_queue {
|
||||
|
||||
struct queue_limits limits;
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
/*
|
||||
* Zoned block device information for request dispatch control.
|
||||
* nr_zones is the total number of zones of the device. This is always
|
||||
@@ -612,6 +609,7 @@ struct request_queue {
|
||||
unsigned int nr_zones;
|
||||
unsigned long *seq_zones_bitmap;
|
||||
unsigned long *seq_zones_wlock;
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
/*
|
||||
* sg stuff
|
||||
@@ -800,11 +798,7 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
|
||||
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
|
||||
{
|
||||
return q->nr_zones;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
|
||||
sector_t sector)
|
||||
{
|
||||
@@ -820,6 +814,7 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
|
||||
return false;
|
||||
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
|
||||
}
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
static inline bool rq_is_sync(struct request *rq)
|
||||
{
|
||||
@@ -1070,6 +1065,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
|
||||
return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static inline unsigned int blk_rq_zone_no(struct request *rq)
|
||||
{
|
||||
return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
|
||||
@@ -1079,6 +1075,7 @@ static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
|
||||
{
|
||||
return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
|
||||
}
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
/*
|
||||
* Some commands like WRITE SAME have a payload or data transfer size which
|
||||
@@ -1437,8 +1434,6 @@ enum blk_default_limits {
|
||||
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
|
||||
};
|
||||
|
||||
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
|
||||
|
||||
static inline unsigned long queue_segment_boundary(struct request_queue *q)
|
||||
{
|
||||
return q->limits.seg_boundary_mask;
|
||||
@@ -1639,15 +1634,6 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_nr_zones(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q)
|
||||
return blk_queue_nr_zones(q);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int queue_dma_alignment(struct request_queue *q)
|
||||
{
|
||||
return q ? q->dma_alignment : 511;
|
||||
@@ -1877,6 +1863,28 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
|
||||
bip_next->bip_vec[0].bv_offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_intervals - Return number of integrity intervals for a bio
|
||||
* @bi: blk_integrity profile for device
|
||||
* @sectors: Size of the bio in 512-byte sectors
|
||||
*
|
||||
* Description: The block layer calculates everything in 512 byte
|
||||
* sectors but integrity metadata is done in terms of the data integrity
|
||||
* interval size of the storage device. Convert the block layer sectors
|
||||
* to the appropriate number of integrity intervals.
|
||||
*/
|
||||
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
|
||||
unsigned int sectors)
|
||||
{
|
||||
return sectors >> (bi->interval_exp - 9);
|
||||
}
|
||||
|
||||
static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
|
||||
unsigned int sectors)
|
||||
{
|
||||
return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
|
||||
}
|
||||
|
||||
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
struct bio;
|
||||
@@ -1950,12 +1958,24 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
|
||||
unsigned int sectors)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
|
||||
unsigned int sectors)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
struct block_device_operations {
|
||||
int (*open) (struct block_device *, fmode_t);
|
||||
void (*release) (struct gendisk *, fmode_t);
|
||||
int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
|
||||
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
|
||||
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
||||
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
||||
unsigned int (*check_events) (struct gendisk *disk,
|
||||
|
@@ -27,9 +27,20 @@ extern unsigned long max_pfn;
|
||||
extern unsigned long long max_possible_pfn;
|
||||
|
||||
#ifndef CONFIG_NO_BOOTMEM
|
||||
/*
|
||||
* node_bootmem_map is a map pointer - the bits represent all physical
|
||||
* memory pages (including holes) on the node.
|
||||
/**
|
||||
* struct bootmem_data - per-node information used by the bootmem allocator
|
||||
* @node_min_pfn: the starting physical address of the node's memory
|
||||
* @node_low_pfn: the end physical address of the directly addressable memory
|
||||
* @node_bootmem_map: is a bitmap pointer - the bits represent all physical
|
||||
* memory pages (including holes) on the node.
|
||||
* @last_end_off: the offset within the page of the end of the last allocation;
|
||||
* if 0, the page used is full
|
||||
* @hint_idx: the PFN of the page used with the last allocation;
|
||||
* together with using this with the @last_end_offset field,
|
||||
* a test can be made to see if allocations can be merged
|
||||
* with the page used for the last allocation rather than
|
||||
* using up a full new page.
|
||||
* @list: list entry in the linked list ordered by the memory addresses
|
||||
*/
|
||||
typedef struct bootmem_data {
|
||||
unsigned long node_min_pfn;
|
||||
|
@@ -4,22 +4,46 @@
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
struct sock;
|
||||
struct sockaddr;
|
||||
struct cgroup;
|
||||
struct sk_buff;
|
||||
struct bpf_map;
|
||||
struct bpf_prog;
|
||||
struct bpf_sock_ops_kern;
|
||||
struct bpf_cgroup_storage;
|
||||
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
|
||||
extern struct static_key_false cgroup_bpf_enabled_key;
|
||||
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
|
||||
|
||||
DECLARE_PER_CPU(void*, bpf_cgroup_storage);
|
||||
|
||||
struct bpf_cgroup_storage_map;
|
||||
|
||||
struct bpf_storage_buffer {
|
||||
struct rcu_head rcu;
|
||||
char data[0];
|
||||
};
|
||||
|
||||
struct bpf_cgroup_storage {
|
||||
struct bpf_storage_buffer *buf;
|
||||
struct bpf_cgroup_storage_map *map;
|
||||
struct bpf_cgroup_storage_key key;
|
||||
struct list_head list;
|
||||
struct rb_node node;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct bpf_prog_list {
|
||||
struct list_head node;
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_cgroup_storage *storage;
|
||||
};
|
||||
|
||||
struct bpf_prog_array;
|
||||
@@ -77,6 +101,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
||||
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
||||
short access, enum bpf_attach_type type);
|
||||
|
||||
static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
|
||||
{
|
||||
struct bpf_storage_buffer *buf;
|
||||
|
||||
if (!storage)
|
||||
return;
|
||||
|
||||
buf = READ_ONCE(storage->buf);
|
||||
this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
|
||||
}
|
||||
|
||||
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
|
||||
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
|
||||
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
|
||||
struct cgroup *cgroup,
|
||||
enum bpf_attach_type type);
|
||||
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
|
||||
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
|
||||
void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
|
||||
|
||||
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
||||
({ \
|
||||
@@ -221,6 +265,16 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
|
||||
static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
|
||||
struct bpf_map *map) { return 0; }
|
||||
static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
|
||||
struct bpf_map *map) {}
|
||||
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
|
||||
struct bpf_prog *prog) { return 0; }
|
||||
static inline void bpf_cgroup_storage_free(
|
||||
struct bpf_cgroup_storage *storage) {}
|
||||
|
||||
#define cgroup_bpf_enabled (0)
|
||||
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
||||
|
@@ -23,7 +23,7 @@ struct bpf_prog;
|
||||
struct bpf_map;
|
||||
struct sock;
|
||||
struct seq_file;
|
||||
struct btf;
|
||||
struct btf_type;
|
||||
|
||||
/* map is generic key/value storage optionally accesible by eBPF programs */
|
||||
struct bpf_map_ops {
|
||||
@@ -48,8 +48,9 @@ struct bpf_map_ops {
|
||||
u32 (*map_fd_sys_lookup_elem)(void *ptr);
|
||||
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
|
||||
struct seq_file *m);
|
||||
int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf,
|
||||
u32 key_type_id, u32 value_type_id);
|
||||
int (*map_check_btf)(const struct bpf_map *map,
|
||||
const struct btf_type *key_type,
|
||||
const struct btf_type *value_type);
|
||||
};
|
||||
|
||||
struct bpf_map {
|
||||
@@ -85,6 +86,7 @@ struct bpf_map {
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
};
|
||||
|
||||
struct bpf_offload_dev;
|
||||
struct bpf_offloaded_map;
|
||||
|
||||
struct bpf_map_dev_ops {
|
||||
@@ -117,9 +119,13 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
|
||||
|
||||
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
|
||||
{
|
||||
return map->ops->map_seq_show_elem && map->ops->map_check_btf;
|
||||
return map->btf && map->ops->map_seq_show_elem;
|
||||
}
|
||||
|
||||
int map_check_no_btf(const struct bpf_map *map,
|
||||
const struct btf_type *key_type,
|
||||
const struct btf_type *value_type);
|
||||
|
||||
extern const struct bpf_map_ops bpf_map_offload_ops;
|
||||
|
||||
/* function argument constraints */
|
||||
@@ -154,6 +160,7 @@ enum bpf_arg_type {
|
||||
enum bpf_return_type {
|
||||
RET_INTEGER, /* function returns integer */
|
||||
RET_VOID, /* function doesn't return anything */
|
||||
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
|
||||
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
|
||||
};
|
||||
|
||||
@@ -281,6 +288,7 @@ struct bpf_prog_aux {
|
||||
struct bpf_prog *prog;
|
||||
struct user_struct *user;
|
||||
u64 load_time; /* ns since boottime */
|
||||
struct bpf_map *cgroup_storage;
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
@@ -347,12 +355,17 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
* The 'struct bpf_prog_array *' should only be replaced with xchg()
|
||||
* since other cpus are walking the array of pointers in parallel.
|
||||
*/
|
||||
struct bpf_prog_array {
|
||||
struct rcu_head rcu;
|
||||
struct bpf_prog *progs[0];
|
||||
struct bpf_prog_array_item {
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_cgroup_storage *cgroup_storage;
|
||||
};
|
||||
|
||||
struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
|
||||
struct bpf_prog_array {
|
||||
struct rcu_head rcu;
|
||||
struct bpf_prog_array_item items[0];
|
||||
};
|
||||
|
||||
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
|
||||
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
|
||||
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
|
||||
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
@@ -370,7 +383,8 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
|
||||
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
|
||||
({ \
|
||||
struct bpf_prog **_prog, *__prog; \
|
||||
struct bpf_prog_array_item *_item; \
|
||||
struct bpf_prog *_prog; \
|
||||
struct bpf_prog_array *_array; \
|
||||
u32 _ret = 1; \
|
||||
preempt_disable(); \
|
||||
@@ -378,10 +392,11 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
_array = rcu_dereference(array); \
|
||||
if (unlikely(check_non_null && !_array))\
|
||||
goto _out; \
|
||||
_prog = _array->progs; \
|
||||
while ((__prog = READ_ONCE(*_prog))) { \
|
||||
_ret &= func(__prog, ctx); \
|
||||
_prog++; \
|
||||
_item = &_array->items[0]; \
|
||||
while ((_prog = READ_ONCE(_item->prog))) { \
|
||||
bpf_cgroup_storage_set(_item->cgroup_storage); \
|
||||
_ret &= func(_prog, ctx); \
|
||||
_item++; \
|
||||
} \
|
||||
_out: \
|
||||
rcu_read_unlock(); \
|
||||
@@ -434,6 +449,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
|
||||
void bpf_map_put_with_uref(struct bpf_map *map);
|
||||
void bpf_map_put(struct bpf_map *map);
|
||||
int bpf_map_precharge_memlock(u32 pages);
|
||||
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
|
||||
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
|
||||
void *bpf_map_area_alloc(size_t size, int numa_node);
|
||||
void bpf_map_area_free(void *base);
|
||||
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
|
||||
@@ -512,6 +529,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
|
||||
int array_map_alloc_check(union bpf_attr *attr);
|
||||
|
||||
#else /* !CONFIG_BPF_SYSCALL */
|
||||
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
|
||||
@@ -648,7 +666,15 @@ int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
|
||||
int bpf_map_offload_get_next_key(struct bpf_map *map,
|
||||
void *key, void *next_key);
|
||||
|
||||
bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
|
||||
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
|
||||
|
||||
struct bpf_offload_dev *bpf_offload_dev_create(void);
|
||||
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
|
||||
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
|
||||
struct net_device *netdev);
|
||||
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
|
||||
struct net_device *netdev);
|
||||
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
|
||||
|
||||
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
|
||||
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
|
||||
@@ -749,6 +775,33 @@ static inline void __xsk_map_flush(struct bpf_map *map)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
|
||||
void bpf_sk_reuseport_detach(struct sock *sk);
|
||||
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
|
||||
void *value);
|
||||
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags);
|
||||
#else
|
||||
static inline void bpf_sk_reuseport_detach(struct sock *sk)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
|
||||
void *key, void *value)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
|
||||
void *key, void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
|
||||
|
||||
/* verifier prototypes for helper functions called from eBPF programs */
|
||||
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
|
||||
extern const struct bpf_func_proto bpf_map_update_elem_proto;
|
||||
@@ -768,6 +821,8 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
|
||||
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
|
||||
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
|
||||
|
||||
extern const struct bpf_func_proto bpf_get_local_storage_proto;
|
||||
|
||||
/* Shared helpers among cBPF and eBPF. */
|
||||
void bpf_user_rnd_init_once(void);
|
||||
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
@@ -29,6 +29,9 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
|
||||
#ifdef CONFIG_BPF_LIRC_MODE2
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
|
||||
#endif
|
||||
#ifdef CONFIG_INET
|
||||
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport)
|
||||
#endif
|
||||
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
|
||||
@@ -37,6 +40,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
|
||||
#ifdef CONFIG_CGROUPS
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
|
||||
#endif
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
|
||||
@@ -57,4 +63,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
|
||||
#if defined(CONFIG_XDP_SOCKETS)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
|
||||
#endif
|
||||
#ifdef CONFIG_INET
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
|
||||
#endif
|
||||
#endif
|
||||
|
@@ -45,6 +45,7 @@
|
||||
#define PHY_ID_BCM7445 0x600d8510
|
||||
|
||||
#define PHY_ID_BCM_CYGNUS 0xae025200
|
||||
#define PHY_ID_BCM_OMEGA 0xae025100
|
||||
|
||||
#define PHY_BCM_OUI_MASK 0xfffffc00
|
||||
#define PHY_BCM_OUI_1 0x00206000
|
||||
|
20
include/linux/build-salt.h
Normal file
20
include/linux/build-salt.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef __BUILD_SALT_H
|
||||
#define __BUILD_SALT_H
|
||||
|
||||
#include <linux/elfnote.h>
|
||||
|
||||
#define LINUX_ELFNOTE_BUILD_SALT 0x100
|
||||
|
||||
#ifdef __ASSEMBLER__
|
||||
|
||||
#define BUILD_SALT \
|
||||
ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT)
|
||||
|
||||
#else
|
||||
|
||||
#define BUILD_SALT \
|
||||
ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT)
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __BUILD_SALT_H */
|
@@ -143,7 +143,12 @@ u8 can_dlc2len(u8 can_dlc);
|
||||
/* map the sanitized data length to an appropriate data length code */
|
||||
u8 can_len2dlc(u8 len);
|
||||
|
||||
struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
|
||||
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
|
||||
unsigned int txqs, unsigned int rxqs);
|
||||
#define alloc_candev(sizeof_priv, echo_skb_max) \
|
||||
alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1)
|
||||
#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \
|
||||
alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count)
|
||||
void free_candev(struct net_device *dev);
|
||||
|
||||
/* a candev safe wrapper around netdev_priv */
|
||||
|
@@ -13,6 +13,7 @@
|
||||
|
||||
#include <linux/fs.h> /* not really needed, later.. */
|
||||
#include <linux/list.h>
|
||||
#include <scsi/scsi_common.h>
|
||||
#include <uapi/linux/cdrom.h>
|
||||
|
||||
struct packet_command
|
||||
@@ -21,7 +22,7 @@ struct packet_command
|
||||
unsigned char *buffer;
|
||||
unsigned int buflen;
|
||||
int stat;
|
||||
struct request_sense *sense;
|
||||
struct scsi_sense_hdr *sshdr;
|
||||
unsigned char data_direction;
|
||||
int quiet;
|
||||
int timeout;
|
||||
|
@@ -438,6 +438,9 @@ struct cgroup {
|
||||
/* used to store eBPF programs */
|
||||
struct cgroup_bpf bpf;
|
||||
|
||||
/* If there is block congestion on this cgroup. */
|
||||
atomic_t congestion_count;
|
||||
|
||||
/* ids of the ancestors at each level including self */
|
||||
int ancestor_ids[];
|
||||
};
|
||||
|
@@ -553,6 +553,36 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
|
||||
return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_ancestor - find ancestor of cgroup
|
||||
* @cgrp: cgroup to find ancestor of
|
||||
* @ancestor_level: level of ancestor to find starting from root
|
||||
*
|
||||
* Find ancestor of cgroup at specified level starting from root if it exists
|
||||
* and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
|
||||
* @ancestor_level.
|
||||
*
|
||||
* This function is safe to call as long as @cgrp is accessible.
|
||||
*/
|
||||
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
|
||||
int ancestor_level)
|
||||
{
|
||||
struct cgroup *ptr;
|
||||
|
||||
if (cgrp->level < ancestor_level)
|
||||
return NULL;
|
||||
|
||||
for (ptr = cgrp;
|
||||
ptr && ptr->level > ancestor_level;
|
||||
ptr = cgroup_parent(ptr))
|
||||
;
|
||||
|
||||
if (ptr && ptr->level == ancestor_level)
|
||||
return ptr;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
|
||||
* @task: the task to be tested
|
||||
|
@@ -38,6 +38,8 @@
|
||||
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
|
||||
/* parents need enable during gate/ungate, set rate and re-parent */
|
||||
#define CLK_OPS_PARENT_ENABLE BIT(12)
|
||||
/* duty cycle call may be forwarded to the parent clock */
|
||||
#define CLK_DUTY_CYCLE_PARENT BIT(13)
|
||||
|
||||
struct clk;
|
||||
struct clk_hw;
|
||||
@@ -66,6 +68,17 @@ struct clk_rate_request {
|
||||
struct clk_hw *best_parent_hw;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct clk_duty - Struture encoding the duty cycle ratio of a clock
|
||||
*
|
||||
* @num: Numerator of the duty cycle ratio
|
||||
* @den: Denominator of the duty cycle ratio
|
||||
*/
|
||||
struct clk_duty {
|
||||
unsigned int num;
|
||||
unsigned int den;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct clk_ops - Callback operations for hardware clocks; these are to
|
||||
* be provided by the clock implementation, and will be called by drivers
|
||||
@@ -169,6 +182,15 @@ struct clk_rate_request {
|
||||
* by the second argument. Valid values for degrees are
|
||||
* 0-359. Return 0 on success, otherwise -EERROR.
|
||||
*
|
||||
* @get_duty_cycle: Queries the hardware to get the current duty cycle ratio
|
||||
* of a clock. Returned values denominator cannot be 0 and must be
|
||||
* superior or equal to the numerator.
|
||||
*
|
||||
* @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by
|
||||
* the numerator (2nd argurment) and denominator (3rd argument).
|
||||
* Argument must be a valid ratio (denominator > 0
|
||||
* and >= numerator) Return 0 on success, otherwise -EERROR.
|
||||
*
|
||||
* @init: Perform platform-specific initialization magic.
|
||||
* This is not not used by any of the basic clock types.
|
||||
* Please consider other ways of solving initialization problems
|
||||
@@ -218,6 +240,10 @@ struct clk_ops {
|
||||
unsigned long parent_accuracy);
|
||||
int (*get_phase)(struct clk_hw *hw);
|
||||
int (*set_phase)(struct clk_hw *hw, int degrees);
|
||||
int (*get_duty_cycle)(struct clk_hw *hw,
|
||||
struct clk_duty *duty);
|
||||
int (*set_duty_cycle)(struct clk_hw *hw,
|
||||
struct clk_duty *duty);
|
||||
void (*init)(struct clk_hw *hw);
|
||||
void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
|
||||
};
|
||||
|
@@ -141,6 +141,27 @@ int clk_set_phase(struct clk *clk, int degrees);
|
||||
*/
|
||||
int clk_get_phase(struct clk *clk);
|
||||
|
||||
/**
|
||||
* clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
|
||||
* @clk: clock signal source
|
||||
* @num: numerator of the duty cycle ratio to be applied
|
||||
* @den: denominator of the duty cycle ratio to be applied
|
||||
*
|
||||
* Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
|
||||
* success, -EERROR otherwise.
|
||||
*/
|
||||
int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
|
||||
|
||||
/**
|
||||
* clk_get_duty_cycle - return the duty cycle ratio of a clock signal
|
||||
* @clk: clock signal source
|
||||
* @scale: scaling factor to be applied to represent the ratio as an integer
|
||||
*
|
||||
* Returns the duty cycle ratio multiplied by the scale provided, otherwise
|
||||
* returns -EERROR.
|
||||
*/
|
||||
int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
|
||||
|
||||
/**
|
||||
* clk_is_match - check if two clk's point to the same hardware clock
|
||||
* @p: clk compared against q
|
||||
@@ -183,6 +204,18 @@ static inline long clk_get_phase(struct clk *clk)
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
|
||||
unsigned int den)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
|
||||
unsigned int scale)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool clk_is_match(const struct clk *p, const struct clk *q)
|
||||
{
|
||||
return p == q;
|
||||
|
@@ -194,6 +194,9 @@ extern void clocksource_suspend(void);
|
||||
extern void clocksource_resume(void);
|
||||
extern struct clocksource * __init clocksource_default_clock(void);
|
||||
extern void clocksource_mark_unstable(struct clocksource *cs);
|
||||
extern void
|
||||
clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
|
||||
extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
|
||||
|
||||
extern u64
|
||||
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
|
||||
|
@@ -115,11 +115,6 @@ typedef compat_ulong_t compat_aio_context_t;
|
||||
struct compat_sel_arg_struct;
|
||||
struct rusage;
|
||||
|
||||
struct compat_itimerspec {
|
||||
struct compat_timespec it_interval;
|
||||
struct compat_timespec it_value;
|
||||
};
|
||||
|
||||
struct compat_utimbuf {
|
||||
compat_time_t actime;
|
||||
compat_time_t modtime;
|
||||
@@ -300,10 +295,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
|
||||
extern int compat_put_timespec(const struct timespec *, void __user *);
|
||||
extern int compat_get_timeval(struct timeval *, const void __user *);
|
||||
extern int compat_put_timeval(const struct timeval *, void __user *);
|
||||
extern int get_compat_itimerspec64(struct itimerspec64 *its,
|
||||
const struct compat_itimerspec __user *uits);
|
||||
extern int put_compat_itimerspec64(const struct itimerspec64 *its,
|
||||
struct compat_itimerspec __user *uits);
|
||||
|
||||
struct compat_iovec {
|
||||
compat_uptr_t iov_base;
|
||||
@@ -1028,6 +1019,17 @@ static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
|
||||
return ctv;
|
||||
}
|
||||
|
||||
/*
|
||||
* Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz())
|
||||
* directly. Instead, use one of the functions which work equivalently, such
|
||||
* as the kcompat_sys_xyzyyz() functions prototyped below.
|
||||
*/
|
||||
|
||||
int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
|
||||
struct compat_statfs64 __user * buf);
|
||||
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
|
||||
struct compat_statfs64 __user * buf);
|
||||
|
||||
#else /* !CONFIG_COMPAT */
|
||||
|
||||
#define is_compat_task() (0)
|
||||
|
@@ -17,7 +17,16 @@ struct compat_timeval {
|
||||
s32 tv_usec;
|
||||
};
|
||||
|
||||
struct compat_itimerspec {
|
||||
struct compat_timespec it_interval;
|
||||
struct compat_timespec it_value;
|
||||
};
|
||||
|
||||
extern int compat_get_timespec64(struct timespec64 *, const void __user *);
|
||||
extern int compat_put_timespec64(const struct timespec64 *, void __user *);
|
||||
extern int get_compat_itimerspec64(struct itimerspec64 *its,
|
||||
const struct compat_itimerspec __user *uits);
|
||||
extern int put_compat_itimerspec64(const struct itimerspec64 *its,
|
||||
struct compat_itimerspec __user *uits);
|
||||
|
||||
#endif /* _LINUX_COMPAT_TIME_H */
|
||||
|
@@ -21,6 +21,7 @@ struct console_font_op;
|
||||
struct console_font;
|
||||
struct module;
|
||||
struct tty_struct;
|
||||
struct notifier_block;
|
||||
|
||||
/*
|
||||
* this is what the terminal answers to a ESC-Z or csi0c query.
|
||||
@@ -220,4 +221,8 @@ static inline bool vgacon_text_force(void) { return false; }
|
||||
|
||||
extern void console_init(void);
|
||||
|
||||
/* For deferred console takeover */
|
||||
void dummycon_register_output_notifier(struct notifier_block *nb);
|
||||
void dummycon_unregister_output_notifier(struct notifier_block *nb);
|
||||
|
||||
#endif /* _LINUX_CONSOLE_H */
|
||||
|
@@ -55,6 +55,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_l1tf(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
|
||||
extern __printf(4, 5)
|
||||
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
||||
@@ -103,6 +105,7 @@ extern void cpus_write_lock(void);
|
||||
extern void cpus_write_unlock(void);
|
||||
extern void cpus_read_lock(void);
|
||||
extern void cpus_read_unlock(void);
|
||||
extern int cpus_read_trylock(void);
|
||||
extern void lockdep_assert_cpus_held(void);
|
||||
extern void cpu_hotplug_disable(void);
|
||||
extern void cpu_hotplug_enable(void);
|
||||
@@ -115,6 +118,7 @@ static inline void cpus_write_lock(void) { }
|
||||
static inline void cpus_write_unlock(void) { }
|
||||
static inline void cpus_read_lock(void) { }
|
||||
static inline void cpus_read_unlock(void) { }
|
||||
static inline int cpus_read_trylock(void) { return true; }
|
||||
static inline void lockdep_assert_cpus_held(void) { }
|
||||
static inline void cpu_hotplug_disable(void) { }
|
||||
static inline void cpu_hotplug_enable(void) { }
|
||||
@@ -166,4 +170,23 @@ void cpuhp_report_idle_dead(void);
|
||||
static inline void cpuhp_report_idle_dead(void) { }
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
enum cpuhp_smt_control {
|
||||
CPU_SMT_ENABLED,
|
||||
CPU_SMT_DISABLED,
|
||||
CPU_SMT_FORCE_DISABLED,
|
||||
CPU_SMT_NOT_SUPPORTED,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
|
||||
extern enum cpuhp_smt_control cpu_smt_control;
|
||||
extern void cpu_smt_disable(bool force);
|
||||
extern void cpu_smt_check_topology_early(void);
|
||||
extern void cpu_smt_check_topology(void);
|
||||
#else
|
||||
# define cpu_smt_control (CPU_SMT_ENABLED)
|
||||
static inline void cpu_smt_disable(bool force) { }
|
||||
static inline void cpu_smt_check_topology_early(void) { }
|
||||
static inline void cpu_smt_check_topology(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_CPU_H_ */
|
||||
|
@@ -164,6 +164,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
|
||||
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
|
||||
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
|
||||
CPUHP_AP_WATCHDOG_ONLINE,
|
||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||
CPUHP_AP_RCUTREE_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
|
@@ -115,12 +115,17 @@ extern struct cpumask __cpu_active_mask;
|
||||
#define cpu_active(cpu) ((cpu) == 0)
|
||||
#endif
|
||||
|
||||
static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
WARN_ON_ONCE(cpu >= bits);
|
||||
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
}
|
||||
|
||||
/* verify cpu argument to cpumask_* operators */
|
||||
static inline unsigned int cpumask_check(unsigned int cpu)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
WARN_ON_ONCE(cpu >= nr_cpumask_bits);
|
||||
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
cpu_max_bits_warn(cpu, nr_cpumask_bits);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
@@ -154,6 +159,13 @@ static inline unsigned int cpumask_next_and(int n,
|
||||
return n+1;
|
||||
}
|
||||
|
||||
static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
|
||||
int start, bool wrap)
|
||||
{
|
||||
/* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
|
||||
return (wrap && n == 0);
|
||||
}
|
||||
|
||||
/* cpu must be a valid cpu, ie 0, so there's no other choice. */
|
||||
static inline unsigned int cpumask_any_but(const struct cpumask *mask,
|
||||
unsigned int cpu)
|
||||
|
20
include/linux/crc32poly.h
Normal file
20
include/linux/crc32poly.h
Normal file
@@ -0,0 +1,20 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_CRC32_POLY_H
|
||||
#define _LINUX_CRC32_POLY_H
|
||||
|
||||
/*
|
||||
* There are multiple 16-bit CRC polynomials in common use, but this is
|
||||
* *the* standard CRC-32 polynomial, first popularized by Ethernet.
|
||||
* x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
|
||||
*/
|
||||
#define CRC32_POLY_LE 0xedb88320
|
||||
#define CRC32_POLY_BE 0x04c11db7
|
||||
|
||||
/*
|
||||
* This is the CRC32c polynomial, as outlined by Castagnoli.
|
||||
* x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
|
||||
* x^8+x^6+x^0
|
||||
*/
|
||||
#define CRC32C_POLY_LE 0x82F63B78
|
||||
|
||||
#endif /* _LINUX_CRC32_POLY_H */
|
@@ -65,6 +65,12 @@ extern void groups_free(struct group_info *);
|
||||
|
||||
extern int in_group_p(kgid_t);
|
||||
extern int in_egroup_p(kgid_t);
|
||||
extern int groups_search(const struct group_info *, kgid_t);
|
||||
|
||||
extern int set_current_groups(struct group_info *);
|
||||
extern void set_groups(struct cred *, struct group_info *);
|
||||
extern bool may_setgroups(void);
|
||||
extern void groups_sort(struct group_info *);
|
||||
#else
|
||||
static inline void groups_free(struct group_info *group_info)
|
||||
{
|
||||
@@ -78,12 +84,11 @@ static inline int in_egroup_p(kgid_t grp)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
static inline int groups_search(const struct group_info *group_info, kgid_t grp)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
extern int set_current_groups(struct group_info *);
|
||||
extern void set_groups(struct cred *, struct group_info *);
|
||||
extern int groups_search(const struct group_info *, kgid_t);
|
||||
extern bool may_setgroups(void);
|
||||
extern void groups_sort(struct group_info *);
|
||||
|
||||
/*
|
||||
* The security context of a task
|
||||
|
@@ -112,6 +112,11 @@
|
||||
*/
|
||||
#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
|
||||
|
||||
/*
|
||||
* Don't trigger module loading
|
||||
*/
|
||||
#define CRYPTO_NOLOAD 0x00008000
|
||||
|
||||
/*
|
||||
* Transform masks and values (for crt_flags).
|
||||
*/
|
||||
|
@@ -227,7 +227,6 @@ extern void d_instantiate(struct dentry *, struct inode *);
|
||||
extern void d_instantiate_new(struct dentry *, struct inode *);
|
||||
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
|
||||
extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
|
||||
extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
|
||||
extern void __d_drop(struct dentry *dentry);
|
||||
extern void d_drop(struct dentry *dentry);
|
||||
extern void d_delete(struct dentry *);
|
||||
@@ -271,8 +270,6 @@ extern void d_rehash(struct dentry *);
|
||||
|
||||
extern void d_add(struct dentry *, struct inode *);
|
||||
|
||||
extern void dentry_update_name_case(struct dentry *, const struct qstr *);
|
||||
|
||||
/* used for rename() and baskets */
|
||||
extern void d_move(struct dentry *, struct dentry *);
|
||||
extern void d_exchange(struct dentry *, struct dentry *);
|
||||
|
@@ -90,7 +90,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
|
||||
* @num_vf: Called to find out how many virtual functions a device on this
|
||||
* bus supports.
|
||||
* @dma_configure: Called to setup DMA configuration on a device on
|
||||
this bus.
|
||||
* this bus.
|
||||
* @pm: Power management operations of this bus, callback the specific
|
||||
* device driver's pm-ops.
|
||||
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
|
||||
@@ -384,6 +384,9 @@ int subsys_virtual_register(struct bus_type *subsys,
|
||||
* @shutdown_pre: Called at shut-down time before driver shutdown.
|
||||
* @ns_type: Callbacks so sysfs can detemine namespaces.
|
||||
* @namespace: Namespace of the device belongs to this class.
|
||||
* @get_ownership: Allows class to specify uid/gid of the sysfs directories
|
||||
* for the devices belonging to the class. Usually tied to
|
||||
* device's namespace.
|
||||
* @pm: The default device power management operations of this class.
|
||||
* @p: The private data of the driver core, no one other than the
|
||||
* driver core can touch this.
|
||||
@@ -413,6 +416,8 @@ struct class {
|
||||
const struct kobj_ns_type_operations *ns_type;
|
||||
const void *(*namespace)(struct device *dev);
|
||||
|
||||
void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
|
||||
|
||||
const struct dev_pm_ops *pm;
|
||||
|
||||
struct subsys_private *p;
|
||||
@@ -784,14 +789,16 @@ enum device_link_state {
|
||||
* Device link flags.
|
||||
*
|
||||
* STATELESS: The core won't track the presence of supplier/consumer drivers.
|
||||
* AUTOREMOVE: Remove this link automatically on consumer driver unbind.
|
||||
* AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
|
||||
* PM_RUNTIME: If set, the runtime PM framework will use this link.
|
||||
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
|
||||
* AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
|
||||
*/
|
||||
#define DL_FLAG_STATELESS BIT(0)
|
||||
#define DL_FLAG_AUTOREMOVE BIT(1)
|
||||
#define DL_FLAG_PM_RUNTIME BIT(2)
|
||||
#define DL_FLAG_RPM_ACTIVE BIT(3)
|
||||
#define DL_FLAG_STATELESS BIT(0)
|
||||
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
|
||||
#define DL_FLAG_PM_RUNTIME BIT(2)
|
||||
#define DL_FLAG_RPM_ACTIVE BIT(3)
|
||||
#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
|
||||
|
||||
/**
|
||||
* struct device_link - Device link representation.
|
||||
@@ -886,6 +893,8 @@ struct dev_links_info {
|
||||
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
|
||||
* hardware supports 64-bit addresses for consistent allocations
|
||||
* such descriptors.
|
||||
* @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
|
||||
* limit than the device itself supports.
|
||||
* @dma_pfn_offset: offset of DMA memory range relatively of RAM
|
||||
* @dma_parms: A low level driver may set these to teach IOMMU code about
|
||||
* segment limitations.
|
||||
@@ -912,8 +921,6 @@ struct dev_links_info {
|
||||
* @offline: Set after successful invocation of bus type's .offline().
|
||||
* @of_node_reused: Set if the device-tree node is shared with an ancestor
|
||||
* device.
|
||||
* @dma_32bit_limit: bridge limited to 32bit DMA even if the device itself
|
||||
* indicates support for a higher limit in the dma_mask field.
|
||||
*
|
||||
* At the lowest level, every device in a Linux system is represented by an
|
||||
* instance of struct device. The device structure contains the information
|
||||
@@ -967,6 +974,7 @@ struct device {
|
||||
not all hardware supports
|
||||
64 bit addresses for consistent
|
||||
allocations such descriptors. */
|
||||
u64 bus_dma_mask; /* upstream dma_mask constraint */
|
||||
unsigned long dma_pfn_offset;
|
||||
|
||||
struct device_dma_parameters *dma_parms;
|
||||
@@ -1002,7 +1010,6 @@ struct device {
|
||||
bool offline_disabled:1;
|
||||
bool offline:1;
|
||||
bool of_node_reused:1;
|
||||
bool dma_32bit_limit:1;
|
||||
};
|
||||
|
||||
static inline struct device *kobj_to_dev(struct kobject *kobj)
|
||||
@@ -1316,6 +1323,7 @@ extern const char *dev_driver_string(const struct device *dev);
|
||||
struct device_link *device_link_add(struct device *consumer,
|
||||
struct device *supplier, u32 flags);
|
||||
void device_link_del(struct device_link *link);
|
||||
void device_link_remove(void *consumer, struct device *supplier);
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
|
||||
|
@@ -39,12 +39,12 @@ struct dma_buf_attachment;
|
||||
|
||||
/**
|
||||
* struct dma_buf_ops - operations possible on struct dma_buf
|
||||
* @map_atomic: maps a page from the buffer into kernel address
|
||||
* @map_atomic: [optional] maps a page from the buffer into kernel address
|
||||
* space, users may not block until the subsequent unmap call.
|
||||
* This callback must not sleep.
|
||||
* @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
|
||||
* This Callback must not sleep.
|
||||
* @map: maps a page from the buffer into kernel address space.
|
||||
* @map: [optional] maps a page from the buffer into kernel address space.
|
||||
* @unmap: [optional] unmaps a page from the buffer.
|
||||
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
|
||||
* address space. Same restrictions as for vmap and friends apply.
|
||||
@@ -55,11 +55,11 @@ struct dma_buf_ops {
|
||||
* @attach:
|
||||
*
|
||||
* This is called from dma_buf_attach() to make sure that a given
|
||||
* &device can access the provided &dma_buf. Exporters which support
|
||||
* buffer objects in special locations like VRAM or device-specific
|
||||
* carveout areas should check whether the buffer could be move to
|
||||
* system memory (or directly accessed by the provided device), and
|
||||
* otherwise need to fail the attach operation.
|
||||
* &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
|
||||
* which support buffer objects in special locations like VRAM or
|
||||
* device-specific carveout areas should check whether the buffer could
|
||||
* be move to system memory (or directly accessed by the provided
|
||||
* device), and otherwise need to fail the attach operation.
|
||||
*
|
||||
* The exporter should also in general check whether the current
|
||||
* allocation fullfills the DMA constraints of the new device. If this
|
||||
@@ -77,8 +77,7 @@ struct dma_buf_ops {
|
||||
* to signal that backing storage is already allocated and incompatible
|
||||
* with the requirements of requesting device.
|
||||
*/
|
||||
int (*attach)(struct dma_buf *, struct device *,
|
||||
struct dma_buf_attachment *);
|
||||
int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
|
||||
|
||||
/**
|
||||
* @detach:
|
||||
@@ -206,8 +205,6 @@ struct dma_buf_ops {
|
||||
* to be restarted.
|
||||
*/
|
||||
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
void *(*map_atomic)(struct dma_buf *, unsigned long);
|
||||
void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
|
||||
void *(*map)(struct dma_buf *, unsigned long);
|
||||
void (*unmap)(struct dma_buf *, unsigned long, void *);
|
||||
|
||||
@@ -395,8 +392,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
|
||||
enum dma_data_direction dir);
|
||||
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
|
||||
enum dma_data_direction dir);
|
||||
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
|
||||
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
|
||||
void *dma_buf_kmap(struct dma_buf *, unsigned long);
|
||||
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
|
||||
|
||||
|
@@ -1,14 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_DMA_DIRECTION_H
|
||||
#define _LINUX_DMA_DIRECTION_H
|
||||
/*
|
||||
* These definitions mirror those in pci.h, so they can be used
|
||||
* interchangeably with their PCI_ counterparts.
|
||||
*/
|
||||
|
||||
enum dma_data_direction {
|
||||
DMA_BIDIRECTIONAL = 0,
|
||||
DMA_TO_DEVICE = 1,
|
||||
DMA_FROM_DEVICE = 2,
|
||||
DMA_NONE = 3,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -166,7 +166,8 @@ struct dma_fence_ops {
|
||||
* released when the fence is signalled (through e.g. the interrupt
|
||||
* handler).
|
||||
*
|
||||
* This callback is mandatory.
|
||||
* This callback is optional. If this callback is not present, then the
|
||||
* driver must always have signaling enabled.
|
||||
*/
|
||||
bool (*enable_signaling)(struct dma_fence *fence);
|
||||
|
||||
@@ -190,11 +191,14 @@ struct dma_fence_ops {
|
||||
/**
|
||||
* @wait:
|
||||
*
|
||||
* Custom wait implementation, or dma_fence_default_wait.
|
||||
* Custom wait implementation, defaults to dma_fence_default_wait() if
|
||||
* not set.
|
||||
*
|
||||
* Must not be NULL, set to dma_fence_default_wait for default implementation.
|
||||
* the dma_fence_default_wait implementation should work for any fence, as long
|
||||
* as enable_signaling works correctly.
|
||||
* The dma_fence_default_wait implementation should work for any fence, as long
|
||||
* as @enable_signaling works correctly. This hook allows drivers to
|
||||
* have an optimized version for the case where a process context is
|
||||
* already available, e.g. if @enable_signaling for the general case
|
||||
* needs to set up a worker thread.
|
||||
*
|
||||
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
|
||||
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
|
||||
@@ -202,7 +206,7 @@ struct dma_fence_ops {
|
||||
* which should be treated as if the fence is signaled. For example a hardware
|
||||
* lockup could be reported like that.
|
||||
*
|
||||
* This callback is mandatory.
|
||||
* This callback is optional.
|
||||
*/
|
||||
signed long (*wait)(struct dma_fence *fence,
|
||||
bool intr, signed long timeout);
|
||||
@@ -217,17 +221,6 @@ struct dma_fence_ops {
|
||||
*/
|
||||
void (*release)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @fill_driver_data:
|
||||
*
|
||||
* Callback to fill in free-form debug info.
|
||||
*
|
||||
* Returns amount of bytes filled, or negative error on failure.
|
||||
*
|
||||
* This callback is optional.
|
||||
*/
|
||||
int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
|
||||
|
||||
/**
|
||||
* @fence_value_str:
|
||||
*
|
||||
@@ -242,8 +235,9 @@ struct dma_fence_ops {
|
||||
* @timeline_value_str:
|
||||
*
|
||||
* Fills in the current value of the timeline as a string, like the
|
||||
* sequence number. This should match what @fill_driver_data prints for
|
||||
* the most recently signalled fence (assuming no delayed signalling).
|
||||
* sequence number. Note that the specific fence passed to this function
|
||||
* should not matter, drivers should only use it to look up the
|
||||
* corresponding timeline structures.
|
||||
*/
|
||||
void (*timeline_value_str)(struct dma_fence *fence,
|
||||
char *str, int size);
|
||||
|
@@ -538,10 +538,17 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!ops);
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
|
||||
return;
|
||||
/*
|
||||
* On non-coherent platforms which implement DMA-coherent buffers via
|
||||
* non-cacheable remaps, ops->free() may call vunmap(). Thus getting
|
||||
* this far in IRQ context is a) at risk of a BUG_ON() or trying to
|
||||
* sleep on some machines, and b) an indication that the driver is
|
||||
* probably misusing the coherent API anyway.
|
||||
*/
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
if (!ops->free || !cpu_addr)
|
||||
return;
|
||||
|
@@ -44,4 +44,12 @@ static inline void arch_sync_dma_for_cpu(struct device *dev,
|
||||
}
|
||||
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
|
||||
void arch_sync_dma_for_cpu_all(struct device *dev);
|
||||
#else
|
||||
static inline void arch_sync_dma_for_cpu_all(struct device *dev)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
|
||||
|
||||
#endif /* _LINUX_DMA_NONCOHERENT_H */
|
||||
|
@@ -9,6 +9,15 @@ enum pxad_chan_prio {
|
||||
PXAD_PRIO_LOWEST,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pxad_param - dma channel request parameters
|
||||
* @drcmr: requestor line number
|
||||
* @prio: minimal mandatory priority of the channel
|
||||
*
|
||||
* If a requested channel is granted, its priority will be at least @prio,
|
||||
* ie. if PXAD_PRIO_LOW is required, the requested channel will be either
|
||||
* PXAD_PRIO_LOW, PXAD_PRIO_NORMAL or PXAD_PRIO_HIGHEST.
|
||||
*/
|
||||
struct pxad_param {
|
||||
unsigned int drcmr;
|
||||
enum pxad_chan_prio prio;
|
||||
|
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
|
||||
void *flush;
|
||||
} efi_file_handle_t;
|
||||
|
||||
typedef struct {
|
||||
u64 revision;
|
||||
u32 open_volume;
|
||||
} efi_file_io_interface_32_t;
|
||||
|
||||
typedef struct {
|
||||
u64 revision;
|
||||
u64 open_volume;
|
||||
} efi_file_io_interface_64_t;
|
||||
|
||||
typedef struct _efi_file_io_interface {
|
||||
u64 revision;
|
||||
int (*open_volume)(struct _efi_file_io_interface *,
|
||||
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
|
||||
extern void efi_gettimeofday (struct timespec64 *ts);
|
||||
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
|
||||
#ifdef CONFIG_X86
|
||||
extern void efi_late_init(void);
|
||||
extern void efi_free_boot_services(void);
|
||||
extern efi_status_t efi_query_variable_store(u32 attributes,
|
||||
unsigned long size,
|
||||
bool nonblocking);
|
||||
extern void efi_find_mirror(void);
|
||||
#else
|
||||
static inline void efi_late_init(void) {}
|
||||
static inline void efi_free_boot_services(void) {}
|
||||
|
||||
static inline efi_status_t efi_query_variable_store(u32 attributes,
|
||||
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
|
||||
|
||||
extern int efi_tpm_eventlog_init(void);
|
||||
|
||||
/* Workqueue to queue EFI Runtime Services */
|
||||
extern struct workqueue_struct *efi_rts_wq;
|
||||
|
||||
#endif /* _LINUX_EFI_H */
|
||||
|
@@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
|
||||
unsigned int rxqs);
|
||||
#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
|
||||
|
||||
struct sk_buff **eth_gro_receive(struct sk_buff **head,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
|
||||
int eth_gro_complete(struct sk_buff *skb, int nhoff);
|
||||
|
||||
/* Reserved Ethernet Addresses per IEEE 802.1Q */
|
||||
|
@@ -17,9 +17,12 @@ extern void fput(struct file *);
|
||||
struct file_operations;
|
||||
struct vfsmount;
|
||||
struct dentry;
|
||||
struct inode;
|
||||
struct path;
|
||||
extern struct file *alloc_file(const struct path *, fmode_t mode,
|
||||
const struct file_operations *fop);
|
||||
extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
|
||||
const char *, int flags, const struct file_operations *);
|
||||
extern struct file *alloc_file_clone(struct file *, int flags,
|
||||
const struct file_operations *);
|
||||
|
||||
static inline void fput_light(struct file *file, int fput_needed)
|
||||
{
|
||||
@@ -78,7 +81,6 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
|
||||
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
|
||||
extern void set_close_on_exec(unsigned int fd, int flag);
|
||||
extern bool get_close_on_exec(unsigned int fd);
|
||||
extern void put_filp(struct file *);
|
||||
extern int get_unused_fd_flags(unsigned flags);
|
||||
extern void put_unused_fd(unsigned int fd);
|
||||
|
||||
|
@@ -32,6 +32,7 @@ struct seccomp_data;
|
||||
struct bpf_prog_aux;
|
||||
struct xdp_rxq_info;
|
||||
struct xdp_buff;
|
||||
struct sock_reuseport;
|
||||
|
||||
/* ArgX, context and stack frame pointer register positions. Note,
|
||||
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
|
||||
@@ -537,6 +538,20 @@ struct sk_msg_buff {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct bpf_redirect_info {
|
||||
u32 ifindex;
|
||||
u32 flags;
|
||||
struct bpf_map *map;
|
||||
struct bpf_map *map_to_flush;
|
||||
unsigned long map_owner;
|
||||
u32 kern_flags;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
|
||||
|
||||
/* flags for bpf_redirect_info kern_flags */
|
||||
#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
|
||||
|
||||
/* Compute the linear packet data range [data, data_end) which
|
||||
* will be accessed by various program types (cls_bpf, act_bpf,
|
||||
* lwt, ...). Subsystems allowing direct data access must (!)
|
||||
@@ -738,6 +753,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
||||
int sk_attach_bpf(u32 ufd, struct sock *sk);
|
||||
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
||||
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
|
||||
void sk_reuseport_prog_free(struct bpf_prog *prog);
|
||||
int sk_detach_filter(struct sock *sk);
|
||||
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
|
||||
unsigned int len);
|
||||
@@ -765,6 +781,27 @@ static inline bool bpf_dump_raw_ok(void)
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
const struct bpf_insn *patch, u32 len);
|
||||
|
||||
static inline bool xdp_return_frame_no_direct(void)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
|
||||
return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
|
||||
}
|
||||
|
||||
static inline void xdp_set_return_frame_no_direct(void)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
|
||||
ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
|
||||
}
|
||||
|
||||
static inline void xdp_clear_return_frame_no_direct(void)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
|
||||
ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
|
||||
}
|
||||
|
||||
static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
|
||||
unsigned int pktlen)
|
||||
{
|
||||
@@ -798,6 +835,20 @@ void bpf_warn_invalid_xdp_action(u32 act);
|
||||
struct sock *do_sk_redirect_map(struct sk_buff *skb);
|
||||
struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
|
||||
struct bpf_prog *prog, struct sk_buff *skb,
|
||||
u32 hash);
|
||||
#else
|
||||
static inline struct sock *
|
||||
bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
|
||||
struct bpf_prog *prog, struct sk_buff *skb,
|
||||
u32 hash)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
extern int bpf_jit_enable;
|
||||
extern int bpf_jit_harden;
|
||||
|
@@ -148,6 +148,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
/* Has write method(s) */
|
||||
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
|
||||
|
||||
#define FMODE_OPENED ((__force fmode_t)0x80000)
|
||||
#define FMODE_CREATED ((__force fmode_t)0x100000)
|
||||
|
||||
/* File was opened by fanotify and shouldn't generate fanotify events */
|
||||
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
|
||||
|
||||
@@ -275,6 +278,7 @@ struct writeback_control;
|
||||
|
||||
/*
|
||||
* Write life time hint values.
|
||||
* Stored in struct inode as u8.
|
||||
*/
|
||||
enum rw_hint {
|
||||
WRITE_LIFE_NOT_SET = 0,
|
||||
@@ -609,8 +613,8 @@ struct inode {
|
||||
struct timespec64 i_ctime;
|
||||
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
|
||||
unsigned short i_bytes;
|
||||
unsigned int i_blkbits;
|
||||
enum rw_hint i_write_hint;
|
||||
u8 i_blkbits;
|
||||
u8 i_write_hint;
|
||||
blkcnt_t i_blocks;
|
||||
|
||||
#ifdef __NEED_I_SIZE_ORDERED
|
||||
@@ -684,6 +688,17 @@ static inline int inode_unhashed(struct inode *inode)
|
||||
return hlist_unhashed(&inode->i_hash);
|
||||
}
|
||||
|
||||
/*
|
||||
* __mark_inode_dirty expects inodes to be hashed. Since we don't
|
||||
* want special inodes in the fileset inode space, we make them
|
||||
* appear hashed, but do not put on any lists. hlist_del()
|
||||
* will work fine and require no locking.
|
||||
*/
|
||||
static inline void inode_fake_hash(struct inode *inode)
|
||||
{
|
||||
hlist_add_fake(&inode->i_hash);
|
||||
}
|
||||
|
||||
/*
|
||||
* inode->i_mutex nesting subclasses for the lock validator:
|
||||
*
|
||||
@@ -1776,7 +1791,7 @@ struct inode_operations {
|
||||
int (*update_time)(struct inode *, struct timespec64 *, int);
|
||||
int (*atomic_open)(struct inode *, struct dentry *,
|
||||
struct file *, unsigned open_flag,
|
||||
umode_t create_mode, int *opened);
|
||||
umode_t create_mode);
|
||||
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
|
||||
int (*set_acl)(struct inode *, struct posix_acl *, int);
|
||||
} ____cacheline_aligned;
|
||||
@@ -2014,6 +2029,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
|
||||
* I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
|
||||
* and work dirs among overlayfs mounts.
|
||||
*
|
||||
* I_CREATING New object's inode in the middle of setting up.
|
||||
*
|
||||
* Q: What is the difference between I_WILL_FREE and I_FREEING?
|
||||
*/
|
||||
#define I_DIRTY_SYNC (1 << 0)
|
||||
@@ -2034,7 +2051,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
|
||||
#define __I_DIRTY_TIME_EXPIRED 12
|
||||
#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
|
||||
#define I_WB_SWITCH (1 << 13)
|
||||
#define I_OVL_INUSE (1 << 14)
|
||||
#define I_OVL_INUSE (1 << 14)
|
||||
#define I_CREATING (1 << 15)
|
||||
|
||||
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
|
||||
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
|
||||
@@ -2420,7 +2438,10 @@ extern struct file *filp_open(const char *, int, umode_t);
|
||||
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
|
||||
const char *, int, umode_t);
|
||||
extern struct file * dentry_open(const struct path *, int, const struct cred *);
|
||||
extern struct file *filp_clone_open(struct file *);
|
||||
static inline struct file *file_clone_open(struct file *file)
|
||||
{
|
||||
return dentry_open(&file->f_path, file->f_flags, file->f_cred);
|
||||
}
|
||||
extern int filp_close(struct file *, fl_owner_t id);
|
||||
|
||||
extern struct filename *getname_flags(const char __user *, int, int *);
|
||||
@@ -2428,13 +2449,8 @@ extern struct filename *getname(const char __user *);
|
||||
extern struct filename *getname_kernel(const char *);
|
||||
extern void putname(struct filename *name);
|
||||
|
||||
enum {
|
||||
FILE_CREATED = 1,
|
||||
FILE_OPENED = 2
|
||||
};
|
||||
extern int finish_open(struct file *file, struct dentry *dentry,
|
||||
int (*open)(struct inode *, struct file *),
|
||||
int *opened);
|
||||
int (*open)(struct inode *, struct file *));
|
||||
extern int finish_no_open(struct file *file, struct dentry *dentry);
|
||||
|
||||
/* fs/ioctl.c */
|
||||
@@ -2622,8 +2638,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
|
||||
|
||||
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
|
||||
loff_t lend);
|
||||
extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
|
||||
loff_t lend);
|
||||
extern int filemap_write_and_wait(struct address_space *mapping);
|
||||
extern int filemap_write_and_wait_range(struct address_space *mapping,
|
||||
loff_t lstart, loff_t lend);
|
||||
@@ -2918,6 +2932,7 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
|
||||
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
|
||||
#endif
|
||||
extern void unlock_new_inode(struct inode *);
|
||||
extern void discard_new_inode(struct inode *);
|
||||
extern unsigned int get_next_ino(void);
|
||||
extern void evict_inodes(struct super_block *sb);
|
||||
|
||||
|
@@ -11,9 +11,8 @@
|
||||
|
||||
/*
|
||||
* qoriq ptp registers
|
||||
* Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
|
||||
*/
|
||||
struct qoriq_ptp_registers {
|
||||
struct ctrl_regs {
|
||||
u32 tmr_ctrl; /* Timer control register */
|
||||
u32 tmr_tevent; /* Timestamp event register */
|
||||
u32 tmr_temask; /* Timer event mask register */
|
||||
@@ -28,22 +27,47 @@ struct qoriq_ptp_registers {
|
||||
u8 res1[4];
|
||||
u32 tmroff_h; /* Timer offset high */
|
||||
u32 tmroff_l; /* Timer offset low */
|
||||
u8 res2[8];
|
||||
};
|
||||
|
||||
struct alarm_regs {
|
||||
u32 tmr_alarm1_h; /* Timer alarm 1 high register */
|
||||
u32 tmr_alarm1_l; /* Timer alarm 1 high register */
|
||||
u32 tmr_alarm2_h; /* Timer alarm 2 high register */
|
||||
u32 tmr_alarm2_l; /* Timer alarm 2 high register */
|
||||
u8 res3[48];
|
||||
};
|
||||
|
||||
struct fiper_regs {
|
||||
u32 tmr_fiper1; /* Timer fixed period interval */
|
||||
u32 tmr_fiper2; /* Timer fixed period interval */
|
||||
u32 tmr_fiper3; /* Timer fixed period interval */
|
||||
u8 res4[20];
|
||||
};
|
||||
|
||||
struct etts_regs {
|
||||
u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
|
||||
u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
|
||||
u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
|
||||
u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
|
||||
};
|
||||
|
||||
struct qoriq_ptp_registers {
|
||||
struct ctrl_regs __iomem *ctrl_regs;
|
||||
struct alarm_regs __iomem *alarm_regs;
|
||||
struct fiper_regs __iomem *fiper_regs;
|
||||
struct etts_regs __iomem *etts_regs;
|
||||
};
|
||||
|
||||
/* Offset definitions for the four register groups */
|
||||
#define CTRL_REGS_OFFSET 0x0
|
||||
#define ALARM_REGS_OFFSET 0x40
|
||||
#define FIPER_REGS_OFFSET 0x80
|
||||
#define ETTS_REGS_OFFSET 0xa0
|
||||
|
||||
#define FMAN_CTRL_REGS_OFFSET 0x80
|
||||
#define FMAN_ALARM_REGS_OFFSET 0xb8
|
||||
#define FMAN_FIPER_REGS_OFFSET 0xd0
|
||||
#define FMAN_ETTS_REGS_OFFSET 0xe0
|
||||
|
||||
|
||||
/* Bit definitions for the TMR_CTRL register */
|
||||
#define ALM1P (1<<31) /* Alarm1 output polarity */
|
||||
#define ALM2P (1<<30) /* Alarm2 output polarity */
|
||||
@@ -103,12 +127,16 @@ struct qoriq_ptp_registers {
|
||||
|
||||
|
||||
#define DRIVER "ptp_qoriq"
|
||||
#define DEFAULT_CKSEL 1
|
||||
#define N_EXT_TS 2
|
||||
#define REG_SIZE sizeof(struct qoriq_ptp_registers)
|
||||
|
||||
#define DEFAULT_CKSEL 1
|
||||
#define DEFAULT_TMR_PRSC 2
|
||||
#define DEFAULT_FIPER1_PERIOD 1000000000
|
||||
#define DEFAULT_FIPER2_PERIOD 100000
|
||||
|
||||
struct qoriq_ptp {
|
||||
struct qoriq_ptp_registers __iomem *regs;
|
||||
void __iomem *base;
|
||||
struct qoriq_ptp_registers regs;
|
||||
spinlock_t lock; /* protects regs */
|
||||
struct ptp_clock *clock;
|
||||
struct ptp_clock_info caps;
|
||||
|
@@ -45,7 +45,7 @@ struct fwnode_endpoint {
|
||||
struct fwnode_reference_args {
|
||||
struct fwnode_handle *fwnode;
|
||||
unsigned int nargs;
|
||||
unsigned int args[NR_FWNODE_REFERENCE_ARGS];
|
||||
u64 args[NR_FWNODE_REFERENCE_ARGS];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/blk_types.h>
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
@@ -82,10 +83,10 @@ struct partition {
|
||||
} __attribute__((packed));
|
||||
|
||||
struct disk_stats {
|
||||
unsigned long sectors[2]; /* READs and WRITEs */
|
||||
unsigned long ios[2];
|
||||
unsigned long merges[2];
|
||||
unsigned long ticks[2];
|
||||
unsigned long sectors[NR_STAT_GROUPS];
|
||||
unsigned long ios[NR_STAT_GROUPS];
|
||||
unsigned long merges[NR_STAT_GROUPS];
|
||||
unsigned long ticks[NR_STAT_GROUPS];
|
||||
unsigned long io_ticks;
|
||||
unsigned long time_in_queue;
|
||||
};
|
||||
@@ -353,6 +354,11 @@ static inline void free_part_stats(struct hd_struct *part)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define part_stat_read_accum(part, field) \
|
||||
(part_stat_read(part, field[STAT_READ]) + \
|
||||
part_stat_read(part, field[STAT_WRITE]) + \
|
||||
part_stat_read(part, field[STAT_DISCARD]))
|
||||
|
||||
#define part_stat_add(cpu, part, field, addnd) do { \
|
||||
__part_stat_add((cpu), (part), field, addnd); \
|
||||
if ((part)->partno) \
|
||||
|
@@ -14,7 +14,7 @@
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
/* see Documentation/gpio/gpio-legacy.txt */
|
||||
/* see Documentation/driver-api/gpio/legacy.rst */
|
||||
|
||||
/* make these flag values available regardless of GPIO kconfig options */
|
||||
#define GPIOF_DIR_OUT (0 << 0)
|
||||
|
15
include/linux/gpio/aspeed.h
Normal file
15
include/linux/gpio/aspeed.h
Normal file
@@ -0,0 +1,15 @@
|
||||
#ifndef __GPIO_ASPEED_H
|
||||
#define __GPIO_ASPEED_H
|
||||
|
||||
struct aspeed_gpio_copro_ops {
|
||||
int (*request_access)(void *data);
|
||||
int (*release_access)(void *data);
|
||||
};
|
||||
|
||||
int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
|
||||
u16 *vreg_offset, u16 *dreg_offset, u8 *bit);
|
||||
int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc);
|
||||
int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data);
|
||||
|
||||
|
||||
#endif /* __GPIO_ASPEED_H */
|
@@ -41,11 +41,8 @@ enum gpiod_flags {
|
||||
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
|
||||
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
|
||||
GPIOD_FLAGS_BIT_DIR_VAL,
|
||||
GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
|
||||
GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_OPEN_DRAIN,
|
||||
GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
|
||||
GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_DIR_VAL |
|
||||
GPIOD_FLAGS_BIT_OPEN_DRAIN,
|
||||
GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN,
|
||||
GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
@@ -145,6 +142,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc);
|
||||
int gpiod_cansleep(const struct gpio_desc *desc);
|
||||
|
||||
int gpiod_to_irq(const struct gpio_desc *desc);
|
||||
void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
|
||||
|
||||
/* Convert between the old gpio_ and new gpiod_ interfaces */
|
||||
struct gpio_desc *gpio_to_desc(unsigned gpio);
|
||||
@@ -467,6 +465,12 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@@ -201,6 +201,8 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
|
||||
* @reg_set: output set register (out=high) for generic GPIO
|
||||
* @reg_clr: output clear register (out=low) for generic GPIO
|
||||
* @reg_dir: direction setting register for generic GPIO
|
||||
* @bgpio_dir_inverted: indicates that the direction register is inverted
|
||||
* (gpiolib private state variable)
|
||||
* @bgpio_bits: number of register bits used for a generic GPIO i.e.
|
||||
* <register width> * 8
|
||||
* @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
|
||||
@@ -267,6 +269,7 @@ struct gpio_chip {
|
||||
void __iomem *reg_set;
|
||||
void __iomem *reg_clr;
|
||||
void __iomem *reg_dir;
|
||||
bool bgpio_dir_inverted;
|
||||
int bgpio_bits;
|
||||
spinlock_t bgpio_lock;
|
||||
unsigned long bgpio_data;
|
||||
|
@@ -93,6 +93,7 @@ enum hwmon_temp_attributes {
|
||||
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
|
||||
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
|
||||
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
|
||||
#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm)
|
||||
#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
|
||||
#define HWMON_T_FAULT BIT(hwmon_temp_fault)
|
||||
#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
|
||||
@@ -187,12 +188,16 @@ enum hwmon_power_attributes {
|
||||
hwmon_power_cap_hyst,
|
||||
hwmon_power_cap_max,
|
||||
hwmon_power_cap_min,
|
||||
hwmon_power_min,
|
||||
hwmon_power_max,
|
||||
hwmon_power_crit,
|
||||
hwmon_power_lcrit,
|
||||
hwmon_power_label,
|
||||
hwmon_power_alarm,
|
||||
hwmon_power_cap_alarm,
|
||||
hwmon_power_min_alarm,
|
||||
hwmon_power_max_alarm,
|
||||
hwmon_power_lcrit_alarm,
|
||||
hwmon_power_crit_alarm,
|
||||
};
|
||||
|
||||
@@ -213,12 +218,16 @@ enum hwmon_power_attributes {
|
||||
#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
|
||||
#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
|
||||
#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
|
||||
#define HWMON_P_MIN BIT(hwmon_power_min)
|
||||
#define HWMON_P_MAX BIT(hwmon_power_max)
|
||||
#define HWMON_P_LCRIT BIT(hwmon_power_lcrit)
|
||||
#define HWMON_P_CRIT BIT(hwmon_power_crit)
|
||||
#define HWMON_P_LABEL BIT(hwmon_power_label)
|
||||
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
|
||||
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
|
||||
#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm)
|
||||
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
|
||||
#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
|
||||
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
|
||||
|
||||
enum hwmon_energy_attributes {
|
||||
@@ -389,4 +398,27 @@ devm_hwmon_device_register_with_info(struct device *dev,
|
||||
void hwmon_device_unregister(struct device *dev);
|
||||
void devm_hwmon_device_unregister(struct device *dev);
|
||||
|
||||
/**
|
||||
* hwmon_is_bad_char - Is the char invalid in a hwmon name
|
||||
* @ch: the char to be considered
|
||||
*
|
||||
* hwmon_is_bad_char() can be used to determine if the given character
|
||||
* may not be used in a hwmon name.
|
||||
*
|
||||
* Returns true if the char is invalid, false otherwise.
|
||||
*/
|
||||
static inline bool hwmon_is_bad_char(const char ch)
|
||||
{
|
||||
switch (ch) {
|
||||
case '-':
|
||||
case '*':
|
||||
case ' ':
|
||||
case '\t':
|
||||
case '\n':
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -140,9 +140,14 @@ extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
||||
and probably just as fast.
|
||||
Note that we use i2c_adapter here, because you do not need a specific
|
||||
smbus adapter to call this function. */
|
||||
extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
|
||||
unsigned short flags, char read_write, u8 command,
|
||||
int size, union i2c_smbus_data *data);
|
||||
s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
|
||||
unsigned short flags, char read_write, u8 command,
|
||||
int protocol, union i2c_smbus_data *data);
|
||||
|
||||
/* Unlocked flavor */
|
||||
s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
|
||||
unsigned short flags, char read_write, u8 command,
|
||||
int protocol, union i2c_smbus_data *data);
|
||||
|
||||
/* Now follow the 'nice' access routines. These also document the calling
|
||||
conventions of i2c_smbus_xfer. */
|
||||
|
29
include/linux/idle_inject.h
Normal file
29
include/linux/idle_inject.h
Normal file
@@ -0,0 +1,29 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2018 Linaro Ltd
|
||||
*
|
||||
* Author: Daniel Lezcano <daniel.lezcano@linaro.org>
|
||||
*
|
||||
*/
|
||||
#ifndef __IDLE_INJECT_H__
|
||||
#define __IDLE_INJECT_H__
|
||||
|
||||
/* private idle injection device structure */
|
||||
struct idle_inject_device;
|
||||
|
||||
struct idle_inject_device *idle_inject_register(struct cpumask *cpumask);
|
||||
|
||||
void idle_inject_unregister(struct idle_inject_device *ii_dev);
|
||||
|
||||
int idle_inject_start(struct idle_inject_device *ii_dev);
|
||||
|
||||
void idle_inject_stop(struct idle_inject_device *ii_dev);
|
||||
|
||||
void idle_inject_set_duration(struct idle_inject_device *ii_dev,
|
||||
unsigned int run_duration_ms,
|
||||
unsigned int idle_duration_ms);
|
||||
|
||||
void idle_inject_get_duration(struct idle_inject_device *ii_dev,
|
||||
unsigned int *run_duration_ms,
|
||||
unsigned int *idle_duration_ms);
|
||||
#endif /* __IDLE_INJECT_H__ */
|
@@ -1433,11 +1433,13 @@ struct ieee80211_ht_operation {
|
||||
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
|
||||
|
||||
/*
|
||||
* A-PMDU buffer sizes
|
||||
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
|
||||
* A-MPDU buffer sizes
|
||||
* According to HT size varies from 8 to 64 frames
|
||||
* HE adds the ability to have up to 256 frames.
|
||||
*/
|
||||
#define IEEE80211_MIN_AMPDU_BUF 0x8
|
||||
#define IEEE80211_MAX_AMPDU_BUF 0x40
|
||||
#define IEEE80211_MIN_AMPDU_BUF 0x8
|
||||
#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
|
||||
#define IEEE80211_MAX_AMPDU_BUF 0x100
|
||||
|
||||
|
||||
/* Spatial Multiplexing Power Save Modes (for capability) */
|
||||
@@ -1539,6 +1541,106 @@ struct ieee80211_vht_operation {
|
||||
__le16 basic_mcs_set;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct ieee80211_he_cap_elem - HE capabilities element
|
||||
*
|
||||
* This structure is the "HE capabilities element" fixed fields as
|
||||
* described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3
|
||||
*/
|
||||
struct ieee80211_he_cap_elem {
|
||||
u8 mac_cap_info[5];
|
||||
u8 phy_cap_info[9];
|
||||
} __packed;
|
||||
|
||||
#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
|
||||
|
||||
/**
|
||||
* enum ieee80211_he_mcs_support - HE MCS support definitions
|
||||
* @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
|
||||
* number of streams
|
||||
* @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
|
||||
* @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
|
||||
* @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
|
||||
*
|
||||
* These definitions are used in each 2-bit subfield of the rx_mcs_*
|
||||
* and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
|
||||
* both split into 8 subfields by number of streams. These values indicate
|
||||
* which MCSes are supported for the number of streams the value appears
|
||||
* for.
|
||||
*/
|
||||
enum ieee80211_he_mcs_support {
|
||||
IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
|
||||
IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
|
||||
IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
|
||||
IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
|
||||
*
|
||||
* This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
|
||||
* described in P802.11ax_D2.0 section 9.4.2.237.4
|
||||
*
|
||||
* @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
|
||||
* widths less than 80MHz.
|
||||
* @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
|
||||
* widths less than 80MHz.
|
||||
* @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
|
||||
* width 160MHz.
|
||||
* @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
|
||||
* width 160MHz.
|
||||
* @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
|
||||
* channel width 80p80MHz.
|
||||
* @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
|
||||
* channel width 80p80MHz.
|
||||
*/
|
||||
struct ieee80211_he_mcs_nss_supp {
|
||||
__le16 rx_mcs_80;
|
||||
__le16 tx_mcs_80;
|
||||
__le16 rx_mcs_160;
|
||||
__le16 tx_mcs_160;
|
||||
__le16 rx_mcs_80p80;
|
||||
__le16 tx_mcs_80p80;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct ieee80211_he_operation - HE capabilities element
|
||||
*
|
||||
* This structure is the "HE operation element" fields as
|
||||
* described in P802.11ax_D2.0 section 9.4.2.238
|
||||
*/
|
||||
struct ieee80211_he_operation {
|
||||
__le32 he_oper_params;
|
||||
__le16 he_mcs_nss_set;
|
||||
/* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */
|
||||
u8 optional[0];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
|
||||
*
|
||||
* This structure is the "MU AC Parameter Record" fields as
|
||||
* described in P802.11ax_D2.0 section 9.4.2.240
|
||||
*/
|
||||
struct ieee80211_he_mu_edca_param_ac_rec {
|
||||
u8 aifsn;
|
||||
u8 ecw_min_max;
|
||||
u8 mu_edca_timer;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
|
||||
*
|
||||
* This structure is the "MU EDCA Parameter Set element" fields as
|
||||
* described in P802.11ax_D2.0 section 9.4.2.240
|
||||
*/
|
||||
struct ieee80211_mu_edca_param_set {
|
||||
u8 mu_qos_info;
|
||||
struct ieee80211_he_mu_edca_param_ac_rec ac_be;
|
||||
struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
|
||||
struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
|
||||
struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
|
||||
} __packed;
|
||||
|
||||
/* 802.11ac VHT Capabilities */
|
||||
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
|
||||
@@ -1577,6 +1679,328 @@ struct ieee80211_vht_operation {
|
||||
#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
|
||||
#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
|
||||
|
||||
/* 802.11ax HE MAC capabilities */
|
||||
#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
|
||||
#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
|
||||
#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
|
||||
#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
|
||||
#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
|
||||
#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
|
||||
#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
|
||||
#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
|
||||
#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
|
||||
#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
|
||||
#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
|
||||
#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
|
||||
#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
|
||||
#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
|
||||
#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
|
||||
#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
|
||||
#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
|
||||
|
||||
#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
|
||||
#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
|
||||
#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
|
||||
#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
|
||||
#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
|
||||
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
|
||||
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
|
||||
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
|
||||
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
|
||||
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00
|
||||
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10
|
||||
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20
|
||||
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30
|
||||
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40
|
||||
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50
|
||||
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60
|
||||
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70
|
||||
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70
|
||||
|
||||
/* Link adaptation is split between byte HE_MAC_CAP1 and
|
||||
* HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
|
||||
* in which case the following values apply:
|
||||
* 0 = No feedback.
|
||||
* 1 = reserved.
|
||||
* 2 = Unsolicited feedback.
|
||||
* 3 = both
|
||||
*/
|
||||
#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
|
||||
|
||||
#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
|
||||
#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
|
||||
#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04
|
||||
#define IEEE80211_HE_MAC_CAP2_BSR 0x08
|
||||
#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
|
||||
#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
|
||||
#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
|
||||
#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
|
||||
|
||||
#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01
|
||||
#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
|
||||
#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
|
||||
|
||||
/* The maximum length of an A-MDPU is defined by the combination of the Maximum
|
||||
* A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
|
||||
* same field in the HE capabilities.
|
||||
*/
|
||||
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00
|
||||
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08
|
||||
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10
|
||||
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18
|
||||
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18
|
||||
#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20
|
||||
#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
|
||||
#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
|
||||
|
||||
#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
|
||||
#define IEEE80211_HE_MAC_CAP4_QTP 0x02
|
||||
#define IEEE80211_HE_MAC_CAP4_BQR 0x04
|
||||
#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08
|
||||
#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
|
||||
#define IEEE80211_HE_MAC_CAP4_OPS 0x20
|
||||
#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
|
||||
|
||||
/* 802.11ax HE PHY capabilities */
|
||||
#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01
|
||||
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
|
||||
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
|
||||
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
|
||||
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
|
||||
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
|
||||
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
|
||||
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
|
||||
#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
|
||||
#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
|
||||
#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
|
||||
#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
|
||||
#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
|
||||
#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
|
||||
#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
|
||||
/* Midamble RX Max NSTS is split between byte #2 and byte #3 */
|
||||
#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01
|
||||
#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
|
||||
#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
|
||||
#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
|
||||
#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
|
||||
#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
|
||||
|
||||
/* Note that the meaning of UL MU below is different between an AP and a non-AP
|
||||
* sta, where in the AP case it indicates support for Rx and in the non-AP sta
|
||||
* case it indicates support for Tx.
|
||||
*/
|
||||
#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
|
||||
#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
|
||||
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
|
||||
#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40
|
||||
#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
|
||||
#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
|
||||
|
||||
/* Minimal allowed value of Max STS under 80MHz is 3 */
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
|
||||
|
||||
/* Minimal allowed value of Max STS above 80MHz is 3 */
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
|
||||
#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
|
||||
#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
|
||||
#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
|
||||
#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
|
||||
#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04
|
||||
#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08
|
||||
#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
|
||||
#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
|
||||
#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
|
||||
#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01
|
||||
#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02
|
||||
#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
|
||||
#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
|
||||
#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
|
||||
#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
|
||||
#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
|
||||
#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
|
||||
#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
|
||||
#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
|
||||
#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
|
||||
#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
|
||||
#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
|
||||
|
||||
#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
|
||||
#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
|
||||
#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
|
||||
#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
|
||||
#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
|
||||
#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20
|
||||
|
||||
/* 802.11ax HE TX/RX MCS NSS Support */
|
||||
#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
|
||||
#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
|
||||
#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
|
||||
#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
|
||||
#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
|
||||
|
||||
/* TX/RX HE MCS Support field Highest MCS subfield encoding */
|
||||
enum ieee80211_he_highest_mcs_supported_subfield_enc {
|
||||
HIGHEST_MCS_SUPPORTED_MCS7 = 0,
|
||||
HIGHEST_MCS_SUPPORTED_MCS8,
|
||||
HIGHEST_MCS_SUPPORTED_MCS9,
|
||||
HIGHEST_MCS_SUPPORTED_MCS10,
|
||||
HIGHEST_MCS_SUPPORTED_MCS11,
|
||||
};
|
||||
|
||||
/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
|
||||
static inline u8
|
||||
ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
|
||||
{
|
||||
u8 count = 4;
|
||||
|
||||
if (he_cap->phy_cap_info[0] &
|
||||
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
|
||||
count += 4;
|
||||
|
||||
if (he_cap->phy_cap_info[0] &
|
||||
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
|
||||
count += 4;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/* 802.11ax HE PPE Thresholds */
|
||||
#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
|
||||
#define IEEE80211_PPE_THRES_NSS_POS (0)
|
||||
#define IEEE80211_PPE_THRES_NSS_MASK (7)
|
||||
#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
|
||||
(BIT(5) | BIT(6))
|
||||
#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
|
||||
#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
|
||||
#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
|
||||
|
||||
/*
|
||||
* Calculate 802.11ax HE capabilities IE PPE field size
|
||||
* Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
|
||||
*/
|
||||
static inline u8
|
||||
ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
|
||||
{
|
||||
u8 n;
|
||||
|
||||
if ((phy_cap_info[6] &
|
||||
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
|
||||
return 0;
|
||||
|
||||
n = hweight8(ppe_thres_hdr &
|
||||
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
|
||||
n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
|
||||
IEEE80211_PPE_THRES_NSS_POS));
|
||||
|
||||
/*
|
||||
* Each pair is 6 bits, and we need to add the 7 "header" bits to the
|
||||
* total size.
|
||||
*/
|
||||
n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
|
||||
n = DIV_ROUND_UP(n, 8);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
/* HE Operation defines */
|
||||
#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f
|
||||
#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0
|
||||
#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6
|
||||
#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
|
||||
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
|
||||
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
|
||||
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000
|
||||
#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000
|
||||
#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
|
||||
#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
|
||||
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
|
||||
|
||||
/*
|
||||
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
|
||||
* @he_oper_ie: byte data of the He Operations IE, stating from the the byte
|
||||
* after the ext ID byte. It is assumed that he_oper_ie has at least
|
||||
* sizeof(struct ieee80211_he_operation) bytes, checked already in
|
||||
* ieee802_11_parse_elems_crc()
|
||||
* @return the actual size of the IE data (not including header), or 0 on error
|
||||
*/
|
||||
static inline u8
|
||||
ieee80211_he_oper_size(const u8 *he_oper_ie)
|
||||
{
|
||||
struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
|
||||
u8 oper_len = sizeof(struct ieee80211_he_operation);
|
||||
u32 he_oper_params;
|
||||
|
||||
/* Make sure the input is not NULL */
|
||||
if (!he_oper_ie)
|
||||
return 0;
|
||||
|
||||
/* Calc required length */
|
||||
he_oper_params = le32_to_cpu(he_oper->he_oper_params);
|
||||
if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
|
||||
oper_len += 3;
|
||||
if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP)
|
||||
oper_len++;
|
||||
|
||||
/* Add the first byte (extension ID) to the total length */
|
||||
oper_len++;
|
||||
|
||||
return oper_len;
|
||||
}
|
||||
|
||||
/* Authentication algorithms */
|
||||
#define WLAN_AUTH_OPEN 0
|
||||
#define WLAN_AUTH_SHARED_KEY 1
|
||||
@@ -1992,6 +2416,11 @@ enum ieee80211_eid_ext {
|
||||
WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
|
||||
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
|
||||
WLAN_EID_EXT_FILS_NONCE = 13,
|
||||
WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
|
||||
WLAN_EID_EXT_HE_CAPABILITY = 35,
|
||||
WLAN_EID_EXT_HE_OPERATION = 36,
|
||||
WLAN_EID_EXT_UORA = 37,
|
||||
WLAN_EID_EXT_HE_MU_EDCA = 38,
|
||||
};
|
||||
|
||||
/* Action category code */
|
||||
|
@@ -74,6 +74,11 @@ struct team_port {
|
||||
long mode_priv[0];
|
||||
};
|
||||
|
||||
static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
|
||||
{
|
||||
return rcu_dereference(dev->rx_handler_data);
|
||||
}
|
||||
|
||||
static inline bool team_port_enabled(struct team_port *port)
|
||||
{
|
||||
return port->index != -1;
|
||||
@@ -84,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port)
|
||||
return port->linkup && team_port_enabled(port);
|
||||
}
|
||||
|
||||
static inline bool team_port_dev_txable(const struct net_device *port_dev)
|
||||
{
|
||||
struct team_port *port;
|
||||
bool txable;
|
||||
|
||||
rcu_read_lock();
|
||||
port = team_port_get_rcu(port_dev);
|
||||
txable = port ? team_port_txable(port) : false;
|
||||
rcu_read_unlock();
|
||||
|
||||
return txable;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static inline void team_netpoll_send_skb(struct team_port *port,
|
||||
struct sk_buff *skb)
|
||||
|
@@ -11,14 +11,16 @@
|
||||
#define _LINUX_IMA_H
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/kexec.h>
|
||||
struct linux_binprm;
|
||||
|
||||
#ifdef CONFIG_IMA
|
||||
extern int ima_bprm_check(struct linux_binprm *bprm);
|
||||
extern int ima_file_check(struct file *file, int mask, int opened);
|
||||
extern int ima_file_check(struct file *file, int mask);
|
||||
extern void ima_file_free(struct file *file);
|
||||
extern int ima_file_mmap(struct file *file, unsigned long prot);
|
||||
extern int ima_load_data(enum kernel_load_data_id id);
|
||||
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
|
||||
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
|
||||
enum kernel_read_file_id id);
|
||||
@@ -34,7 +36,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ima_file_check(struct file *file, int mask, int opened)
|
||||
static inline int ima_file_check(struct file *file, int mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -49,6 +51,11 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ima_load_data(enum kernel_load_data_id id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
|
||||
{
|
||||
return 0;
|
||||
|
@@ -93,6 +93,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
|
||||
|
||||
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
|
||||
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
|
||||
#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING)
|
||||
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
|
||||
#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
|
||||
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
|
||||
|
@@ -44,4 +44,17 @@ static inline void integrity_load_keys(void)
|
||||
}
|
||||
#endif /* CONFIG_INTEGRITY */
|
||||
|
||||
#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
|
||||
|
||||
extern int integrity_kernel_module_request(char *kmod_name);
|
||||
|
||||
#else
|
||||
|
||||
static inline int integrity_kernel_module_request(char *kmod_name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
|
||||
|
||||
#endif /* _LINUX_INTEGRITY_H */
|
||||
|
@@ -2,6 +2,9 @@
|
||||
#ifndef LINUX_IOMAP_H
|
||||
#define LINUX_IOMAP_H 1
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct address_space;
|
||||
@@ -9,6 +12,7 @@ struct fiemap_extent_info;
|
||||
struct inode;
|
||||
struct iov_iter;
|
||||
struct kiocb;
|
||||
struct page;
|
||||
struct vm_area_struct;
|
||||
struct vm_fault;
|
||||
|
||||
@@ -29,6 +33,7 @@ struct vm_fault;
|
||||
*/
|
||||
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
|
||||
#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
|
||||
#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */
|
||||
|
||||
/*
|
||||
* Flags that only need to be reported for IOMAP_REPORT requests:
|
||||
@@ -55,6 +60,16 @@ struct iomap {
|
||||
u16 flags; /* flags for mapping */
|
||||
struct block_device *bdev; /* block device for I/O */
|
||||
struct dax_device *dax_dev; /* dax_dev for dax operations */
|
||||
void *inline_data;
|
||||
void *private; /* filesystem private */
|
||||
|
||||
/*
|
||||
* Called when finished processing a page in the mapping returned in
|
||||
* this iomap. At least for now this is only supported in the buffered
|
||||
* write path.
|
||||
*/
|
||||
void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
|
||||
struct page *page, struct iomap *iomap);
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -86,8 +101,40 @@ struct iomap_ops {
|
||||
ssize_t written, unsigned flags, struct iomap *iomap);
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure allocate for each page when block size < PAGE_SIZE to track
|
||||
* sub-page uptodate status and I/O completions.
|
||||
*/
|
||||
struct iomap_page {
|
||||
atomic_t read_count;
|
||||
atomic_t write_count;
|
||||
DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
|
||||
};
|
||||
|
||||
static inline struct iomap_page *to_iomap_page(struct page *page)
|
||||
{
|
||||
if (page_has_private(page))
|
||||
return (struct iomap_page *)page_private(page);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
||||
const struct iomap_ops *ops);
|
||||
int iomap_readpage(struct page *page, const struct iomap_ops *ops);
|
||||
int iomap_readpages(struct address_space *mapping, struct list_head *pages,
|
||||
unsigned nr_pages, const struct iomap_ops *ops);
|
||||
int iomap_set_page_dirty(struct page *page);
|
||||
int iomap_is_partially_uptodate(struct page *page, unsigned long from,
|
||||
unsigned long count);
|
||||
int iomap_releasepage(struct page *page, gfp_t gfp_mask);
|
||||
void iomap_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int len);
|
||||
#ifdef CONFIG_MIGRATION
|
||||
int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
|
||||
struct page *page, enum migrate_mode mode);
|
||||
#else
|
||||
#define iomap_migrate_page NULL
|
||||
#endif
|
||||
int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
|
||||
const struct iomap_ops *ops);
|
||||
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
|
||||
|
@@ -4,7 +4,7 @@
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/uidgid.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rhashtable-types.h>
|
||||
#include <uapi/linux/ipc.h>
|
||||
#include <linux/refcount.h>
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/ns_common.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rhashtable-types.h>
|
||||
|
||||
struct user_namespace;
|
||||
|
||||
|
@@ -73,6 +73,7 @@
|
||||
#define GICD_TYPER_MBIS (1U << 16)
|
||||
|
||||
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
|
||||
#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
|
||||
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
|
||||
|
||||
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
|
||||
@@ -576,8 +577,8 @@ struct rdists {
|
||||
phys_addr_t phys_base;
|
||||
} __percpu *rdist;
|
||||
struct page *prop_page;
|
||||
int id_bits;
|
||||
u64 flags;
|
||||
u32 gicd_typer;
|
||||
bool has_vlpis;
|
||||
bool has_direct_lpi;
|
||||
};
|
||||
|
@@ -299,12 +299,18 @@ struct static_key_false {
|
||||
#define DEFINE_STATIC_KEY_TRUE(name) \
|
||||
struct static_key_true name = STATIC_KEY_TRUE_INIT
|
||||
|
||||
#define DEFINE_STATIC_KEY_TRUE_RO(name) \
|
||||
struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
|
||||
|
||||
#define DECLARE_STATIC_KEY_TRUE(name) \
|
||||
extern struct static_key_true name
|
||||
|
||||
#define DEFINE_STATIC_KEY_FALSE(name) \
|
||||
struct static_key_false name = STATIC_KEY_FALSE_INIT
|
||||
|
||||
#define DEFINE_STATIC_KEY_FALSE_RO(name) \
|
||||
struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
|
||||
|
||||
#define DECLARE_STATIC_KEY_FALSE(name) \
|
||||
extern struct static_key_false name
|
||||
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/uidgid.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct file;
|
||||
@@ -325,12 +326,14 @@ void kernfs_destroy_root(struct kernfs_root *root);
|
||||
|
||||
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
|
||||
const char *name, umode_t mode,
|
||||
kuid_t uid, kgid_t gid,
|
||||
void *priv, const void *ns);
|
||||
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
|
||||
const char *name);
|
||||
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
|
||||
const char *name,
|
||||
umode_t mode, loff_t size,
|
||||
const char *name, umode_t mode,
|
||||
kuid_t uid, kgid_t gid,
|
||||
loff_t size,
|
||||
const struct kernfs_ops *ops,
|
||||
void *priv, const void *ns,
|
||||
struct lock_class_key *key);
|
||||
@@ -415,12 +418,14 @@ static inline void kernfs_destroy_root(struct kernfs_root *root) { }
|
||||
|
||||
static inline struct kernfs_node *
|
||||
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
|
||||
umode_t mode, void *priv, const void *ns)
|
||||
umode_t mode, kuid_t uid, kgid_t gid,
|
||||
void *priv, const void *ns)
|
||||
{ return ERR_PTR(-ENOSYS); }
|
||||
|
||||
static inline struct kernfs_node *
|
||||
__kernfs_create_file(struct kernfs_node *parent, const char *name,
|
||||
umode_t mode, loff_t size, const struct kernfs_ops *ops,
|
||||
umode_t mode, kuid_t uid, kgid_t gid,
|
||||
loff_t size, const struct kernfs_ops *ops,
|
||||
void *priv, const void *ns, struct lock_class_key *key)
|
||||
{ return ERR_PTR(-ENOSYS); }
|
||||
|
||||
@@ -498,12 +503,15 @@ static inline struct kernfs_node *
|
||||
kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
|
||||
void *priv)
|
||||
{
|
||||
return kernfs_create_dir_ns(parent, name, mode, priv, NULL);
|
||||
return kernfs_create_dir_ns(parent, name, mode,
|
||||
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
|
||||
priv, NULL);
|
||||
}
|
||||
|
||||
static inline struct kernfs_node *
|
||||
kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
|
||||
umode_t mode, loff_t size, const struct kernfs_ops *ops,
|
||||
umode_t mode, kuid_t uid, kgid_t gid,
|
||||
loff_t size, const struct kernfs_ops *ops,
|
||||
void *priv, const void *ns)
|
||||
{
|
||||
struct lock_class_key *key = NULL;
|
||||
@@ -511,15 +519,17 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
key = (struct lock_class_key *)&ops->lockdep_key;
|
||||
#endif
|
||||
return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
|
||||
key);
|
||||
return __kernfs_create_file(parent, name, mode, uid, gid,
|
||||
size, ops, priv, ns, key);
|
||||
}
|
||||
|
||||
static inline struct kernfs_node *
|
||||
kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
|
||||
loff_t size, const struct kernfs_ops *ops, void *priv)
|
||||
{
|
||||
return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL);
|
||||
return kernfs_create_file_ns(parent, name, mode,
|
||||
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
|
||||
size, ops, priv, NULL);
|
||||
}
|
||||
|
||||
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/uidgid.h>
|
||||
|
||||
#define UEVENT_HELPER_PATH_LEN 256
|
||||
#define UEVENT_NUM_ENVP 32 /* number of env pointers */
|
||||
@@ -114,6 +115,8 @@ extern struct kobject * __must_check kobject_get_unless_zero(
|
||||
extern void kobject_put(struct kobject *kobj);
|
||||
|
||||
extern const void *kobject_namespace(struct kobject *kobj);
|
||||
extern void kobject_get_ownership(struct kobject *kobj,
|
||||
kuid_t *uid, kgid_t *gid);
|
||||
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
|
||||
|
||||
struct kobj_type {
|
||||
@@ -122,6 +125,7 @@ struct kobj_type {
|
||||
struct attribute **default_attrs;
|
||||
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
|
||||
const void *(*namespace)(struct kobject *kobj);
|
||||
void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
|
||||
};
|
||||
|
||||
struct kobj_uevent_env {
|
||||
|
@@ -63,7 +63,6 @@ struct pt_regs;
|
||||
struct kretprobe;
|
||||
struct kretprobe_instance;
|
||||
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
|
||||
typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
|
||||
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
|
||||
unsigned long flags);
|
||||
typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
|
||||
@@ -101,12 +100,6 @@ struct kprobe {
|
||||
*/
|
||||
kprobe_fault_handler_t fault_handler;
|
||||
|
||||
/*
|
||||
* ... called if breakpoint trap occurs in probe handler.
|
||||
* Return 1 if it handled break, otherwise kernel will see it.
|
||||
*/
|
||||
kprobe_break_handler_t break_handler;
|
||||
|
||||
/* Saved opcode (which has been replaced with breakpoint) */
|
||||
kprobe_opcode_t opcode;
|
||||
|
||||
@@ -154,24 +147,6 @@ static inline int kprobe_ftrace(struct kprobe *p)
|
||||
return p->flags & KPROBE_FLAG_FTRACE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Special probe type that uses setjmp-longjmp type tricks to resume
|
||||
* execution at a specified entry with a matching prototype corresponding
|
||||
* to the probed function - a trick to enable arguments to become
|
||||
* accessible seamlessly by probe handling logic.
|
||||
* Note:
|
||||
* Because of the way compilers allocate stack space for local variables
|
||||
* etc upfront, regardless of sub-scopes within a function, this mirroring
|
||||
* principle currently works only for probes placed on function entry points.
|
||||
*/
|
||||
struct jprobe {
|
||||
struct kprobe kp;
|
||||
void *entry; /* probe handling code to jump to */
|
||||
};
|
||||
|
||||
/* For backward compatibility with old code using JPROBE_ENTRY() */
|
||||
#define JPROBE_ENTRY(handler) (handler)
|
||||
|
||||
/*
|
||||
* Function-return probe -
|
||||
* Note:
|
||||
@@ -389,9 +364,6 @@ int register_kprobe(struct kprobe *p);
|
||||
void unregister_kprobe(struct kprobe *p);
|
||||
int register_kprobes(struct kprobe **kps, int num);
|
||||
void unregister_kprobes(struct kprobe **kps, int num);
|
||||
int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
|
||||
int longjmp_break_handler(struct kprobe *, struct pt_regs *);
|
||||
void jprobe_return(void);
|
||||
unsigned long arch_deref_entry_point(void *);
|
||||
|
||||
int register_kretprobe(struct kretprobe *rp);
|
||||
@@ -439,9 +411,6 @@ static inline void unregister_kprobe(struct kprobe *p)
|
||||
static inline void unregister_kprobes(struct kprobe **kps, int num)
|
||||
{
|
||||
}
|
||||
static inline void jprobe_return(void)
|
||||
{
|
||||
}
|
||||
static inline int register_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
return -ENOSYS;
|
||||
@@ -468,20 +437,6 @@ static inline int enable_kprobe(struct kprobe *kp)
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif /* CONFIG_KPROBES */
|
||||
static inline int register_jprobe(struct jprobe *p)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int register_jprobes(struct jprobe **jps, int num)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void unregister_jprobe(struct jprobe *p)
|
||||
{
|
||||
}
|
||||
static inline void unregister_jprobes(struct jprobe **jps, int num)
|
||||
{
|
||||
}
|
||||
static inline int disable_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
return disable_kprobe(&rp->kp);
|
||||
@@ -490,14 +445,6 @@ static inline int enable_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
return enable_kprobe(&rp->kp);
|
||||
}
|
||||
static inline int disable_jprobe(struct jprobe *jp)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int enable_jprobe(struct jprobe *jp)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_KPROBES
|
||||
static inline bool is_kprobe_insn_slot(unsigned long addr)
|
||||
|
@@ -93,8 +93,11 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
|
||||
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
|
||||
#define ktime_to_timeval(kt) ns_to_timeval((kt))
|
||||
|
||||
/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
|
||||
#define ktime_to_ns(kt) (kt)
|
||||
/* Convert ktime_t to nanoseconds */
|
||||
static inline s64 ktime_to_ns(const ktime_t kt)
|
||||
{
|
||||
return kt;
|
||||
}
|
||||
|
||||
/**
|
||||
* ktime_compare - Compares two ktime_t variables for less, greater or equal
|
||||
|
@@ -253,7 +253,7 @@ static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
|
||||
struct led_trigger {
|
||||
/* Trigger Properties */
|
||||
const char *name;
|
||||
void (*activate)(struct led_classdev *led_cdev);
|
||||
int (*activate)(struct led_classdev *led_cdev);
|
||||
void (*deactivate)(struct led_classdev *led_cdev);
|
||||
|
||||
/* LEDs under control by this trigger (for simple triggers) */
|
||||
@@ -262,8 +262,19 @@ struct led_trigger {
|
||||
|
||||
/* Link to next registered trigger */
|
||||
struct list_head next_trig;
|
||||
|
||||
const struct attribute_group **groups;
|
||||
};
|
||||
|
||||
/*
|
||||
* Currently the attributes in struct led_trigger::groups are added directly to
|
||||
* the LED device. As this might change in the future, the following
|
||||
* macros abstract getting the LED device and its trigger_data from the dev
|
||||
* parameter passed to the attribute accessor functions.
|
||||
*/
|
||||
#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev)))
|
||||
#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev)))
|
||||
|
||||
ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count);
|
||||
ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
|
||||
@@ -288,10 +299,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
|
||||
unsigned long *delay_off,
|
||||
int invert);
|
||||
extern void led_trigger_set_default(struct led_classdev *led_cdev);
|
||||
extern void led_trigger_set(struct led_classdev *led_cdev,
|
||||
struct led_trigger *trigger);
|
||||
extern int led_trigger_set(struct led_classdev *led_cdev,
|
||||
struct led_trigger *trigger);
|
||||
extern void led_trigger_remove(struct led_classdev *led_cdev);
|
||||
|
||||
static inline void led_set_trigger_data(struct led_classdev *led_cdev,
|
||||
void *trigger_data)
|
||||
{
|
||||
led_cdev->trigger_data = trigger_data;
|
||||
}
|
||||
|
||||
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
|
||||
{
|
||||
return led_cdev->trigger_data;
|
||||
@@ -315,6 +332,10 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
|
||||
extern void led_trigger_rename_static(const char *name,
|
||||
struct led_trigger *trig);
|
||||
|
||||
#define module_led_trigger(__led_trigger) \
|
||||
module_driver(__led_trigger, led_trigger_register, \
|
||||
led_trigger_unregister)
|
||||
|
||||
#else
|
||||
|
||||
/* Trigger has no members */
|
||||
@@ -334,9 +355,14 @@ static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
|
||||
unsigned long *delay_off,
|
||||
int invert) {}
|
||||
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
|
||||
static inline void led_trigger_set(struct led_classdev *led_cdev,
|
||||
struct led_trigger *trigger) {}
|
||||
static inline int led_trigger_set(struct led_classdev *led_cdev,
|
||||
struct led_trigger *trigger)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
|
||||
static inline void led_set_trigger_data(struct led_classdev *led_cdev) {}
|
||||
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
|
||||
{
|
||||
return NULL;
|
||||
|
@@ -1111,6 +1111,8 @@ extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
|
||||
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
|
||||
const struct ata_port_info * const * ppi, int n_ports);
|
||||
extern int ata_slave_link_init(struct ata_port *ap);
|
||||
extern void ata_host_get(struct ata_host *host);
|
||||
extern void ata_host_put(struct ata_host *host);
|
||||
extern int ata_host_start(struct ata_host *host);
|
||||
extern int ata_host_register(struct ata_host *host,
|
||||
struct scsi_host_template *sht);
|
||||
|
@@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list,
|
||||
__list_cut_position(list, head, entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_cut_before - cut a list into two, before given entry
|
||||
* @list: a new list to add all removed entries
|
||||
* @head: a list with entries
|
||||
* @entry: an entry within head, could be the head itself
|
||||
*
|
||||
* This helper moves the initial part of @head, up to but
|
||||
* excluding @entry, from @head to @list. You should pass
|
||||
* in @entry an element you know is on @head. @list should
|
||||
* be an empty list or a list you do not care about losing
|
||||
* its data.
|
||||
* If @entry == @head, all entries on @head are moved to
|
||||
* @list.
|
||||
*/
|
||||
static inline void list_cut_before(struct list_head *list,
|
||||
struct list_head *head,
|
||||
struct list_head *entry)
|
||||
{
|
||||
if (head->next == entry) {
|
||||
INIT_LIST_HEAD(list);
|
||||
return;
|
||||
}
|
||||
list->next = head->next;
|
||||
list->next->prev = list;
|
||||
list->prev = entry->prev;
|
||||
list->prev->next = list;
|
||||
head->next = entry;
|
||||
entry->prev = head;
|
||||
}
|
||||
|
||||
static inline void __list_splice(const struct list_head *list,
|
||||
struct list_head *prev,
|
||||
struct list_head *next)
|
||||
|
@@ -576,6 +576,10 @@
|
||||
* userspace to load a kernel module with the given name.
|
||||
* @kmod_name name of the module requested by the kernel
|
||||
* Return 0 if successful.
|
||||
* @kernel_load_data:
|
||||
* Load data provided by userspace.
|
||||
* @id kernel load data identifier
|
||||
* Return 0 if permission is granted.
|
||||
* @kernel_read_file:
|
||||
* Read a file specified by userspace.
|
||||
* @file contains the file structure pointing to the file being read
|
||||
@@ -1569,7 +1573,7 @@ union security_list_options {
|
||||
int (*file_send_sigiotask)(struct task_struct *tsk,
|
||||
struct fown_struct *fown, int sig);
|
||||
int (*file_receive)(struct file *file);
|
||||
int (*file_open)(struct file *file, const struct cred *cred);
|
||||
int (*file_open)(struct file *file);
|
||||
|
||||
int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
|
||||
void (*task_free)(struct task_struct *task);
|
||||
@@ -1582,6 +1586,7 @@ union security_list_options {
|
||||
int (*kernel_act_as)(struct cred *new, u32 secid);
|
||||
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
|
||||
int (*kernel_module_request)(char *kmod_name);
|
||||
int (*kernel_load_data)(enum kernel_load_data_id id);
|
||||
int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
|
||||
int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
|
||||
enum kernel_read_file_id id);
|
||||
@@ -1872,6 +1877,7 @@ struct security_hook_heads {
|
||||
struct hlist_head cred_getsecid;
|
||||
struct hlist_head kernel_act_as;
|
||||
struct hlist_head kernel_create_files_as;
|
||||
struct hlist_head kernel_load_data;
|
||||
struct hlist_head kernel_read_file;
|
||||
struct hlist_head kernel_post_read_file;
|
||||
struct hlist_head kernel_module_request;
|
||||
|
77
include/linux/mailbox/mtk-cmdq-mailbox.h
Normal file
77
include/linux/mailbox/mtk-cmdq-mailbox.h
Normal file
@@ -0,0 +1,77 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018 MediaTek Inc.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __MTK_CMDQ_MAILBOX_H__
|
||||
#define __MTK_CMDQ_MAILBOX_H__
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CMDQ_INST_SIZE 8 /* instruction is 64-bit */
|
||||
#define CMDQ_SUBSYS_SHIFT 16
|
||||
#define CMDQ_OP_CODE_SHIFT 24
|
||||
#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
|
||||
|
||||
#define CMDQ_WFE_UPDATE BIT(31)
|
||||
#define CMDQ_WFE_WAIT BIT(15)
|
||||
#define CMDQ_WFE_WAIT_VALUE 0x1
|
||||
|
||||
/*
|
||||
* CMDQ_CODE_MASK:
|
||||
* set write mask
|
||||
* format: op mask
|
||||
* CMDQ_CODE_WRITE:
|
||||
* write value into target register
|
||||
* format: op subsys address value
|
||||
* CMDQ_CODE_JUMP:
|
||||
* jump by offset
|
||||
* format: op offset
|
||||
* CMDQ_CODE_WFE:
|
||||
* wait for event and clear
|
||||
* it is just clear if no wait
|
||||
* format: [wait] op event update:1 to_wait:1 wait:1
|
||||
* [clear] op event update:1 to_wait:0 wait:0
|
||||
* CMDQ_CODE_EOC:
|
||||
* end of command
|
||||
* format: op irq_flag
|
||||
*/
|
||||
enum cmdq_code {
|
||||
CMDQ_CODE_MASK = 0x02,
|
||||
CMDQ_CODE_WRITE = 0x04,
|
||||
CMDQ_CODE_JUMP = 0x10,
|
||||
CMDQ_CODE_WFE = 0x20,
|
||||
CMDQ_CODE_EOC = 0x40,
|
||||
};
|
||||
|
||||
enum cmdq_cb_status {
|
||||
CMDQ_CB_NORMAL = 0,
|
||||
CMDQ_CB_ERROR
|
||||
};
|
||||
|
||||
struct cmdq_cb_data {
|
||||
enum cmdq_cb_status sta;
|
||||
void *data;
|
||||
};
|
||||
|
||||
typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data);
|
||||
|
||||
struct cmdq_task_cb {
|
||||
cmdq_async_flush_cb cb;
|
||||
void *data;
|
||||
};
|
||||
|
||||
struct cmdq_pkt {
|
||||
void *va_base;
|
||||
dma_addr_t pa_base;
|
||||
size_t cmd_buf_size; /* command occupied size */
|
||||
size_t buf_size; /* real buffer size */
|
||||
struct cmdq_task_cb cb;
|
||||
struct cmdq_task_cb async_cb;
|
||||
void *cl;
|
||||
};
|
||||
|
||||
#endif /* __MTK_CMDQ_MAILBOX_H__ */
|
@@ -20,31 +20,60 @@
|
||||
#define INIT_MEMBLOCK_REGIONS 128
|
||||
#define INIT_PHYSMEM_REGIONS 4
|
||||
|
||||
/* Definition of memblock flags. */
|
||||
enum {
|
||||
/**
|
||||
* enum memblock_flags - definition of memory region attributes
|
||||
* @MEMBLOCK_NONE: no special request
|
||||
* @MEMBLOCK_HOTPLUG: hotpluggable region
|
||||
* @MEMBLOCK_MIRROR: mirrored region
|
||||
* @MEMBLOCK_NOMAP: don't add to kernel direct mapping
|
||||
*/
|
||||
enum memblock_flags {
|
||||
MEMBLOCK_NONE = 0x0, /* No special request */
|
||||
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
|
||||
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
|
||||
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct memblock_region - represents a memory region
|
||||
* @base: physical address of the region
|
||||
* @size: size of the region
|
||||
* @flags: memory region attributes
|
||||
* @nid: NUMA node id
|
||||
*/
|
||||
struct memblock_region {
|
||||
phys_addr_t base;
|
||||
phys_addr_t size;
|
||||
unsigned long flags;
|
||||
enum memblock_flags flags;
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||
int nid;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* struct memblock_type - collection of memory regions of certain type
|
||||
* @cnt: number of regions
|
||||
* @max: size of the allocated array
|
||||
* @total_size: size of all regions
|
||||
* @regions: array of regions
|
||||
* @name: the memory type symbolic name
|
||||
*/
|
||||
struct memblock_type {
|
||||
unsigned long cnt; /* number of regions */
|
||||
unsigned long max; /* size of the allocated array */
|
||||
phys_addr_t total_size; /* size of all regions */
|
||||
unsigned long cnt;
|
||||
unsigned long max;
|
||||
phys_addr_t total_size;
|
||||
struct memblock_region *regions;
|
||||
char *name;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct memblock - memblock allocator metadata
|
||||
* @bottom_up: is bottom up direction?
|
||||
* @current_limit: physical address of the current allocation limit
|
||||
* @memory: usabe memory regions
|
||||
* @reserved: reserved memory regions
|
||||
* @physmem: all physical memory
|
||||
*/
|
||||
struct memblock {
|
||||
bool bottom_up; /* is bottom up direction? */
|
||||
phys_addr_t current_limit;
|
||||
@@ -72,7 +101,7 @@ void memblock_discard(void);
|
||||
|
||||
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t start, phys_addr_t end,
|
||||
int nid, ulong flags);
|
||||
int nid, enum memblock_flags flags);
|
||||
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
|
||||
phys_addr_t size, phys_addr_t align);
|
||||
void memblock_allow_resize(void);
|
||||
@@ -89,19 +118,19 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
|
||||
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
|
||||
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
|
||||
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
|
||||
ulong choose_memblock_flags(void);
|
||||
enum memblock_flags choose_memblock_flags(void);
|
||||
|
||||
/* Low level functions */
|
||||
int memblock_add_range(struct memblock_type *type,
|
||||
phys_addr_t base, phys_addr_t size,
|
||||
int nid, unsigned long flags);
|
||||
int nid, enum memblock_flags flags);
|
||||
|
||||
void __next_mem_range(u64 *idx, int nid, ulong flags,
|
||||
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
|
||||
struct memblock_type *type_a,
|
||||
struct memblock_type *type_b, phys_addr_t *out_start,
|
||||
phys_addr_t *out_end, int *out_nid);
|
||||
|
||||
void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
|
||||
void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
|
||||
struct memblock_type *type_a,
|
||||
struct memblock_type *type_b, phys_addr_t *out_start,
|
||||
phys_addr_t *out_end, int *out_nid);
|
||||
@@ -239,7 +268,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
|
||||
/**
|
||||
* for_each_resv_unavail_range - iterate through reserved and unavailable memory
|
||||
* @i: u64 used as loop variable
|
||||
* @flags: pick from blocks based on memory attributes
|
||||
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
||||
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
||||
*
|
||||
@@ -253,13 +281,13 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
|
||||
NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
|
||||
|
||||
static inline void memblock_set_region_flags(struct memblock_region *r,
|
||||
unsigned long flags)
|
||||
enum memblock_flags flags)
|
||||
{
|
||||
r->flags |= flags;
|
||||
}
|
||||
|
||||
static inline void memblock_clear_region_flags(struct memblock_region *r,
|
||||
unsigned long flags)
|
||||
enum memblock_flags flags)
|
||||
{
|
||||
r->flags &= ~flags;
|
||||
}
|
||||
@@ -317,10 +345,10 @@ static inline bool memblock_bottom_up(void)
|
||||
|
||||
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t start, phys_addr_t end,
|
||||
ulong flags);
|
||||
enum memblock_flags flags);
|
||||
phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
|
||||
phys_addr_t align, phys_addr_t max_addr,
|
||||
int nid, ulong flags);
|
||||
int nid, enum memblock_flags flags);
|
||||
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t max_addr);
|
||||
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
|
||||
@@ -367,8 +395,10 @@ phys_addr_t memblock_get_current_limit(void);
|
||||
*/
|
||||
|
||||
/**
|
||||
* memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
|
||||
* memblock_region_memory_base_pfn - get the lowest pfn of the memory region
|
||||
* @reg: memblock_region structure
|
||||
*
|
||||
* Return: the lowest pfn intersecting with the memory region
|
||||
*/
|
||||
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
|
||||
{
|
||||
@@ -376,8 +406,10 @@ static inline unsigned long memblock_region_memory_base_pfn(const struct membloc
|
||||
}
|
||||
|
||||
/**
|
||||
* memblock_region_memory_end_pfn - Return the end_pfn this region
|
||||
* memblock_region_memory_end_pfn - get the end pfn of the memory region
|
||||
* @reg: memblock_region structure
|
||||
*
|
||||
* Return: the end_pfn of the reserved region
|
||||
*/
|
||||
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
|
||||
{
|
||||
@@ -385,8 +417,10 @@ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock
|
||||
}
|
||||
|
||||
/**
|
||||
* memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
|
||||
* memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
|
||||
* @reg: memblock_region structure
|
||||
*
|
||||
* Return: the lowest pfn intersecting with the reserved region
|
||||
*/
|
||||
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
|
||||
{
|
||||
@@ -394,8 +428,10 @@ static inline unsigned long memblock_region_reserved_base_pfn(const struct membl
|
||||
}
|
||||
|
||||
/**
|
||||
* memblock_region_reserved_end_pfn - Return the end_pfn this region
|
||||
* memblock_region_reserved_end_pfn - get the end pfn of the reserved region
|
||||
* @reg: memblock_region structure
|
||||
*
|
||||
* Return: the end_pfn of the reserved region
|
||||
*/
|
||||
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
|
||||
{
|
||||
|
@@ -317,6 +317,9 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
|
||||
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
|
||||
gfp_t gfp_mask, struct mem_cgroup **memcgp,
|
||||
bool compound);
|
||||
int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
|
||||
gfp_t gfp_mask, struct mem_cgroup **memcgp,
|
||||
bool compound);
|
||||
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
|
||||
bool lrucare, bool compound);
|
||||
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
|
||||
@@ -789,6 +792,16 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int mem_cgroup_try_charge_delay(struct page *page,
|
||||
struct mm_struct *mm,
|
||||
gfp_t gfp_mask,
|
||||
struct mem_cgroup **memcgp,
|
||||
bool compound)
|
||||
{
|
||||
*memcgp = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_commit_charge(struct page *page,
|
||||
struct mem_cgroup *memcg,
|
||||
bool lrucare, bool compound)
|
||||
|
@@ -630,6 +630,7 @@ struct mlx4_caps {
|
||||
u32 vf_caps;
|
||||
bool wol_port[MLX4_MAX_PORTS + 1];
|
||||
struct mlx4_rate_limit_caps rl_caps;
|
||||
u32 health_buffer_addrs;
|
||||
};
|
||||
|
||||
struct mlx4_buf_list {
|
||||
@@ -851,6 +852,12 @@ struct mlx4_vf_dev {
|
||||
u8 n_ports;
|
||||
};
|
||||
|
||||
struct mlx4_fw_crdump {
|
||||
bool snapshot_enable;
|
||||
struct devlink_region *region_crspace;
|
||||
struct devlink_region *region_fw_health;
|
||||
};
|
||||
|
||||
enum mlx4_pci_status {
|
||||
MLX4_PCI_STATUS_DISABLED,
|
||||
MLX4_PCI_STATUS_ENABLED,
|
||||
@@ -871,6 +878,7 @@ struct mlx4_dev_persistent {
|
||||
u8 interface_state;
|
||||
struct mutex pci_status_mutex; /* sync pci state */
|
||||
enum mlx4_pci_status pci_status;
|
||||
struct mlx4_fw_crdump crdump;
|
||||
};
|
||||
|
||||
struct mlx4_dev {
|
||||
|
@@ -332,6 +332,13 @@ enum mlx5_event {
|
||||
|
||||
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
|
||||
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
|
||||
|
||||
MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
|
||||
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -939,9 +946,9 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
|
||||
MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
|
||||
MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
|
||||
MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
|
||||
MLX5_VPORT_ADMIN_STATE_UP = 0x1,
|
||||
MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@@ -817,6 +817,9 @@ struct mlx5_clock {
|
||||
struct mlx5_pps pps_info;
|
||||
};
|
||||
|
||||
struct mlx5_fw_tracer;
|
||||
struct mlx5_vxlan;
|
||||
|
||||
struct mlx5_core_dev {
|
||||
struct pci_dev *pdev;
|
||||
/* sync pci state */
|
||||
@@ -848,6 +851,7 @@ struct mlx5_core_dev {
|
||||
atomic_t num_qps;
|
||||
u32 issi;
|
||||
struct mlx5e_resources mlx5e_res;
|
||||
struct mlx5_vxlan *vxlan;
|
||||
struct {
|
||||
struct mlx5_rsvd_gids reserved_gids;
|
||||
u32 roce_en;
|
||||
@@ -861,6 +865,7 @@ struct mlx5_core_dev {
|
||||
struct mlx5_clock clock;
|
||||
struct mlx5_ib_clock_info *clock_info;
|
||||
struct page *clock_info_page;
|
||||
struct mlx5_fw_tracer *tracer;
|
||||
};
|
||||
|
||||
struct mlx5_db {
|
||||
|
@@ -178,7 +178,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct mlx5_flow_act *flow_act,
|
||||
struct mlx5_flow_destination *dest,
|
||||
int dest_num);
|
||||
int num_dest);
|
||||
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
|
||||
|
||||
int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
|
||||
|
@@ -672,7 +672,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
||||
u8 swp[0x1];
|
||||
u8 swp_csum[0x1];
|
||||
u8 swp_lso[0x1];
|
||||
u8 reserved_at_23[0x1b];
|
||||
u8 reserved_at_23[0xd];
|
||||
u8 max_vxlan_udp_ports[0x8];
|
||||
u8 reserved_at_38[0x6];
|
||||
u8 max_geneve_opt_len[0x1];
|
||||
u8 tunnel_stateless_geneve_rx[0x1];
|
||||
|
||||
@@ -1135,7 +1137,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
|
||||
u8 general_obj_types[0x40];
|
||||
|
||||
u8 reserved_at_440[0x40];
|
||||
u8 reserved_at_440[0x20];
|
||||
|
||||
u8 reserved_at_460[0x10];
|
||||
u8 max_num_eqs[0x10];
|
||||
|
||||
u8 reserved_at_480[0x3];
|
||||
u8 log_max_l2_table[0x5];
|
||||
@@ -3764,8 +3769,8 @@ struct mlx5_ifc_query_vport_state_out_bits {
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
|
||||
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
|
||||
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
|
||||
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_vport_state_in_bits {
|
||||
|
@@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa {
|
||||
enum fpga_tls_cmds {
|
||||
CMD_SETUP_STREAM = 0x1001,
|
||||
CMD_TEARDOWN_STREAM = 0x1002,
|
||||
CMD_RESYNC_RX = 0x1003,
|
||||
};
|
||||
|
||||
#define MLX5_TLS_1_2 (0)
|
||||
|
@@ -43,8 +43,6 @@ enum {
|
||||
};
|
||||
|
||||
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
|
||||
u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
|
||||
u16 vport);
|
||||
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
|
||||
u16 vport, u8 state);
|
||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
|
@@ -335,176 +335,183 @@ struct core_state {
|
||||
|
||||
struct kioctx_table;
|
||||
struct mm_struct {
|
||||
struct vm_area_struct *mmap; /* list of VMAs */
|
||||
struct rb_root mm_rb;
|
||||
u32 vmacache_seqnum; /* per-thread vmacache */
|
||||
struct {
|
||||
struct vm_area_struct *mmap; /* list of VMAs */
|
||||
struct rb_root mm_rb;
|
||||
u32 vmacache_seqnum; /* per-thread vmacache */
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long (*get_unmapped_area) (struct file *filp,
|
||||
unsigned long (*get_unmapped_area) (struct file *filp,
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long pgoff, unsigned long flags);
|
||||
#endif
|
||||
unsigned long mmap_base; /* base of mmap area */
|
||||
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
|
||||
unsigned long mmap_base; /* base of mmap area */
|
||||
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
|
||||
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
|
||||
/* Base adresses for compatible mmap() */
|
||||
unsigned long mmap_compat_base;
|
||||
unsigned long mmap_compat_legacy_base;
|
||||
/* Base adresses for compatible mmap() */
|
||||
unsigned long mmap_compat_base;
|
||||
unsigned long mmap_compat_legacy_base;
|
||||
#endif
|
||||
unsigned long task_size; /* size of task vm space */
|
||||
unsigned long highest_vm_end; /* highest vma end address */
|
||||
pgd_t * pgd;
|
||||
unsigned long task_size; /* size of task vm space */
|
||||
unsigned long highest_vm_end; /* highest vma end address */
|
||||
pgd_t * pgd;
|
||||
|
||||
/**
|
||||
* @mm_users: The number of users including userspace.
|
||||
*
|
||||
* Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
|
||||
* to 0 (i.e. when the task exits and there are no other temporary
|
||||
* reference holders), we also release a reference on @mm_count
|
||||
* (which may then free the &struct mm_struct if @mm_count also
|
||||
* drops to 0).
|
||||
*/
|
||||
atomic_t mm_users;
|
||||
/**
|
||||
* @mm_users: The number of users including userspace.
|
||||
*
|
||||
* Use mmget()/mmget_not_zero()/mmput() to modify. When this
|
||||
* drops to 0 (i.e. when the task exits and there are no other
|
||||
* temporary reference holders), we also release a reference on
|
||||
* @mm_count (which may then free the &struct mm_struct if
|
||||
* @mm_count also drops to 0).
|
||||
*/
|
||||
atomic_t mm_users;
|
||||
|
||||
/**
|
||||
* @mm_count: The number of references to &struct mm_struct
|
||||
* (@mm_users count as 1).
|
||||
*
|
||||
* Use mmgrab()/mmdrop() to modify. When this drops to 0, the
|
||||
* &struct mm_struct is freed.
|
||||
*/
|
||||
atomic_t mm_count;
|
||||
/**
|
||||
* @mm_count: The number of references to &struct mm_struct
|
||||
* (@mm_users count as 1).
|
||||
*
|
||||
* Use mmgrab()/mmdrop() to modify. When this drops to 0, the
|
||||
* &struct mm_struct is freed.
|
||||
*/
|
||||
atomic_t mm_count;
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
atomic_long_t pgtables_bytes; /* PTE page table pages */
|
||||
atomic_long_t pgtables_bytes; /* PTE page table pages */
|
||||
#endif
|
||||
int map_count; /* number of VMAs */
|
||||
int map_count; /* number of VMAs */
|
||||
|
||||
spinlock_t page_table_lock; /* Protects page tables and some counters */
|
||||
struct rw_semaphore mmap_sem;
|
||||
spinlock_t page_table_lock; /* Protects page tables and some
|
||||
* counters
|
||||
*/
|
||||
struct rw_semaphore mmap_sem;
|
||||
|
||||
struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
|
||||
* together off init_mm.mmlist, and are protected
|
||||
* by mmlist_lock
|
||||
*/
|
||||
struct list_head mmlist; /* List of maybe swapped mm's. These
|
||||
* are globally strung together off
|
||||
* init_mm.mmlist, and are protected
|
||||
* by mmlist_lock
|
||||
*/
|
||||
|
||||
|
||||
unsigned long hiwater_rss; /* High-watermark of RSS usage */
|
||||
unsigned long hiwater_vm; /* High-water virtual memory usage */
|
||||
unsigned long hiwater_rss; /* High-watermark of RSS usage */
|
||||
unsigned long hiwater_vm; /* High-water virtual memory usage */
|
||||
|
||||
unsigned long total_vm; /* Total pages mapped */
|
||||
unsigned long locked_vm; /* Pages that have PG_mlocked set */
|
||||
unsigned long pinned_vm; /* Refcount permanently increased */
|
||||
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
|
||||
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
|
||||
unsigned long stack_vm; /* VM_STACK */
|
||||
unsigned long def_flags;
|
||||
unsigned long total_vm; /* Total pages mapped */
|
||||
unsigned long locked_vm; /* Pages that have PG_mlocked set */
|
||||
unsigned long pinned_vm; /* Refcount permanently increased */
|
||||
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
|
||||
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
|
||||
unsigned long stack_vm; /* VM_STACK */
|
||||
unsigned long def_flags;
|
||||
|
||||
spinlock_t arg_lock; /* protect the below fields */
|
||||
unsigned long start_code, end_code, start_data, end_data;
|
||||
unsigned long start_brk, brk, start_stack;
|
||||
unsigned long arg_start, arg_end, env_start, env_end;
|
||||
spinlock_t arg_lock; /* protect the below fields */
|
||||
unsigned long start_code, end_code, start_data, end_data;
|
||||
unsigned long start_brk, brk, start_stack;
|
||||
unsigned long arg_start, arg_end, env_start, env_end;
|
||||
|
||||
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
|
||||
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
|
||||
|
||||
/*
|
||||
* Special counters, in some configurations protected by the
|
||||
* page_table_lock, in other configurations by being atomic.
|
||||
*/
|
||||
struct mm_rss_stat rss_stat;
|
||||
/*
|
||||
* Special counters, in some configurations protected by the
|
||||
* page_table_lock, in other configurations by being atomic.
|
||||
*/
|
||||
struct mm_rss_stat rss_stat;
|
||||
|
||||
struct linux_binfmt *binfmt;
|
||||
struct linux_binfmt *binfmt;
|
||||
|
||||
cpumask_var_t cpu_vm_mask_var;
|
||||
/* Architecture-specific MM context */
|
||||
mm_context_t context;
|
||||
|
||||
/* Architecture-specific MM context */
|
||||
mm_context_t context;
|
||||
unsigned long flags; /* Must use atomic bitops to access */
|
||||
|
||||
unsigned long flags; /* Must use atomic bitops to access the bits */
|
||||
|
||||
struct core_state *core_state; /* coredumping support */
|
||||
struct core_state *core_state; /* coredumping support */
|
||||
#ifdef CONFIG_MEMBARRIER
|
||||
atomic_t membarrier_state;
|
||||
atomic_t membarrier_state;
|
||||
#endif
|
||||
#ifdef CONFIG_AIO
|
||||
spinlock_t ioctx_lock;
|
||||
struct kioctx_table __rcu *ioctx_table;
|
||||
spinlock_t ioctx_lock;
|
||||
struct kioctx_table __rcu *ioctx_table;
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG
|
||||
/*
|
||||
* "owner" points to a task that is regarded as the canonical
|
||||
* user/owner of this mm. All of the following must be true in
|
||||
* order for it to be changed:
|
||||
*
|
||||
* current == mm->owner
|
||||
* current->mm != mm
|
||||
* new_owner->mm == mm
|
||||
* new_owner->alloc_lock is held
|
||||
*/
|
||||
struct task_struct __rcu *owner;
|
||||
/*
|
||||
* "owner" points to a task that is regarded as the canonical
|
||||
* user/owner of this mm. All of the following must be true in
|
||||
* order for it to be changed:
|
||||
*
|
||||
* current == mm->owner
|
||||
* current->mm != mm
|
||||
* new_owner->mm == mm
|
||||
* new_owner->alloc_lock is held
|
||||
*/
|
||||
struct task_struct __rcu *owner;
|
||||
#endif
|
||||
struct user_namespace *user_ns;
|
||||
struct user_namespace *user_ns;
|
||||
|
||||
/* store ref to file /proc/<pid>/exe symlink points to */
|
||||
struct file __rcu *exe_file;
|
||||
/* store ref to file /proc/<pid>/exe symlink points to */
|
||||
struct file __rcu *exe_file;
|
||||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
struct mmu_notifier_mm *mmu_notifier_mm;
|
||||
struct mmu_notifier_mm *mmu_notifier_mm;
|
||||
#endif
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
||||
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
|
||||
#endif
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
struct cpumask cpumask_allocation;
|
||||
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
/*
|
||||
* numa_next_scan is the next time that the PTEs will be marked
|
||||
* pte_numa. NUMA hinting faults will gather statistics and migrate
|
||||
* pages to new nodes if necessary.
|
||||
*/
|
||||
unsigned long numa_next_scan;
|
||||
/*
|
||||
* numa_next_scan is the next time that the PTEs will be marked
|
||||
* pte_numa. NUMA hinting faults will gather statistics and
|
||||
* migrate pages to new nodes if necessary.
|
||||
*/
|
||||
unsigned long numa_next_scan;
|
||||
|
||||
/* Restart point for scanning and setting pte_numa */
|
||||
unsigned long numa_scan_offset;
|
||||
/* Restart point for scanning and setting pte_numa */
|
||||
unsigned long numa_scan_offset;
|
||||
|
||||
/* numa_scan_seq prevents two threads setting pte_numa */
|
||||
int numa_scan_seq;
|
||||
/* numa_scan_seq prevents two threads setting pte_numa */
|
||||
int numa_scan_seq;
|
||||
#endif
|
||||
/*
|
||||
* An operation with batched TLB flushing is going on. Anything that
|
||||
* can move process memory needs to flush the TLB when moving a
|
||||
* PROT_NONE or PROT_NUMA mapped page.
|
||||
*/
|
||||
atomic_t tlb_flush_pending;
|
||||
/*
|
||||
* An operation with batched TLB flushing is going on. Anything
|
||||
* that can move process memory needs to flush the TLB when
|
||||
* moving a PROT_NONE or PROT_NUMA mapped page.
|
||||
*/
|
||||
atomic_t tlb_flush_pending;
|
||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||||
/* See flush_tlb_batched_pending() */
|
||||
bool tlb_flush_batched;
|
||||
/* See flush_tlb_batched_pending() */
|
||||
bool tlb_flush_batched;
|
||||
#endif
|
||||
struct uprobes_state uprobes_state;
|
||||
struct uprobes_state uprobes_state;
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
atomic_long_t hugetlb_usage;
|
||||
atomic_long_t hugetlb_usage;
|
||||
#endif
|
||||
struct work_struct async_put_work;
|
||||
struct work_struct async_put_work;
|
||||
|
||||
#if IS_ENABLED(CONFIG_HMM)
|
||||
/* HMM needs to track a few things per mm */
|
||||
struct hmm *hmm;
|
||||
/* HMM needs to track a few things per mm */
|
||||
struct hmm *hmm;
|
||||
#endif
|
||||
} __randomize_layout;
|
||||
} __randomize_layout;
|
||||
|
||||
/*
|
||||
* The mm_cpumask needs to be at the end of mm_struct, because it
|
||||
* is dynamically sized based on nr_cpu_ids.
|
||||
*/
|
||||
unsigned long cpu_bitmap[];
|
||||
};
|
||||
|
||||
extern struct mm_struct init_mm;
|
||||
|
||||
/* Pointer magic because the dynamic array size confuses some compilers. */
|
||||
static inline void mm_init_cpumask(struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
mm->cpu_vm_mask_var = &mm->cpumask_allocation;
|
||||
#endif
|
||||
cpumask_clear(mm->cpu_vm_mask_var);
|
||||
unsigned long cpu_bitmap = (unsigned long)mm;
|
||||
|
||||
cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
|
||||
cpumask_clear((struct cpumask *)cpu_bitmap);
|
||||
}
|
||||
|
||||
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
|
||||
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
|
||||
{
|
||||
return mm->cpu_vm_mask_var;
|
||||
return (struct cpumask *)&mm->cpu_bitmap;
|
||||
}
|
||||
|
||||
struct mmu_gather;
|
||||
|
@@ -2,7 +2,7 @@
|
||||
#define __LINUX_MROUTE_BASE_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rhashtable-types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/sock.h>
|
||||
@@ -254,6 +254,7 @@ struct mr_table {
|
||||
atomic_t cache_resolve_queue_len;
|
||||
bool mroute_do_assert;
|
||||
bool mroute_do_pim;
|
||||
bool mroute_do_wrvifwhole;
|
||||
int mroute_reg_vif_num;
|
||||
};
|
||||
|
||||
|
@@ -67,9 +67,11 @@ struct mtd_erase_region_info {
|
||||
* @datbuf: data buffer - if NULL only oob data are read/written
|
||||
* @oobbuf: oob data buffer
|
||||
*
|
||||
* Note, it is allowed to read more than one OOB area at one go, but not write.
|
||||
* The interface assumes that the OOB write requests program only one page's
|
||||
* OOB area.
|
||||
* Note, some MTD drivers do not allow you to write more than one OOB area at
|
||||
* one go. If you try to do that on such an MTD device, -EINVAL will be
|
||||
* returned. If you want to make your implementation portable on all kind of MTD
|
||||
* devices you should split the write request into several sub-requests when the
|
||||
* request crosses a page boundary.
|
||||
*/
|
||||
struct mtd_oob_ops {
|
||||
unsigned int mode;
|
||||
|
@@ -21,11 +21,10 @@
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/flashchip.h>
|
||||
#include <linux/mtd/bbm.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct mtd_info;
|
||||
struct nand_flash_dev;
|
||||
struct device_node;
|
||||
|
||||
/* Scan and identify a NAND device */
|
||||
int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
|
||||
@@ -36,17 +35,6 @@ static inline int nand_scan(struct mtd_info *mtd, int max_chips)
|
||||
return nand_scan_with_ids(mtd, max_chips, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Separate phases of nand_scan(), allowing board driver to intervene
|
||||
* and override command or ECC setup according to flash type.
|
||||
*/
|
||||
int nand_scan_ident(struct mtd_info *mtd, int max_chips,
|
||||
struct nand_flash_dev *table);
|
||||
int nand_scan_tail(struct mtd_info *mtd);
|
||||
|
||||
/* Unregister the MTD device and free resources held by the NAND device */
|
||||
void nand_release(struct mtd_info *mtd);
|
||||
|
||||
/* Internal helper for board drivers which need to override command function */
|
||||
void nand_wait_ready(struct mtd_info *mtd);
|
||||
|
||||
@@ -121,6 +109,7 @@ enum nand_ecc_algo {
|
||||
NAND_ECC_UNKNOWN,
|
||||
NAND_ECC_HAMMING,
|
||||
NAND_ECC_BCH,
|
||||
NAND_ECC_RS,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -218,6 +207,12 @@ enum nand_ecc_algo {
|
||||
*/
|
||||
#define NAND_WAIT_TCCS 0x00200000
|
||||
|
||||
/*
|
||||
* Whether the NAND chip is a boot medium. Drivers might use this information
|
||||
* to select ECC algorithms supported by the boot ROM or similar restrictions.
|
||||
*/
|
||||
#define NAND_IS_BOOT_MEDIUM 0x00400000
|
||||
|
||||
/* Options set by nand scan */
|
||||
/* Nand scan has allocated controller struct */
|
||||
#define NAND_CONTROLLER_ALLOC 0x80000000
|
||||
@@ -230,6 +225,17 @@ enum nand_ecc_algo {
|
||||
/* Keep gcc happy */
|
||||
struct nand_chip;
|
||||
|
||||
/* ONFI version bits */
|
||||
#define ONFI_VERSION_1_0 BIT(1)
|
||||
#define ONFI_VERSION_2_0 BIT(2)
|
||||
#define ONFI_VERSION_2_1 BIT(3)
|
||||
#define ONFI_VERSION_2_2 BIT(4)
|
||||
#define ONFI_VERSION_2_3 BIT(5)
|
||||
#define ONFI_VERSION_3_0 BIT(6)
|
||||
#define ONFI_VERSION_3_1 BIT(7)
|
||||
#define ONFI_VERSION_3_2 BIT(8)
|
||||
#define ONFI_VERSION_4_0 BIT(9)
|
||||
|
||||
/* ONFI features */
|
||||
#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
|
||||
#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
|
||||
@@ -470,13 +476,13 @@ struct onfi_params {
|
||||
*/
|
||||
struct nand_parameters {
|
||||
/* Generic parameters */
|
||||
char model[100];
|
||||
const char *model;
|
||||
bool supports_set_get_features;
|
||||
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
|
||||
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
|
||||
|
||||
/* ONFI parameters */
|
||||
struct onfi_params onfi;
|
||||
struct onfi_params *onfi;
|
||||
};
|
||||
|
||||
/* The maximum expected count of bytes in the NAND ID sequence */
|
||||
@@ -493,20 +499,42 @@ struct nand_id {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
|
||||
* struct nand_controller_ops - Controller operations
|
||||
*
|
||||
* @attach_chip: this method is called after the NAND detection phase after
|
||||
* flash ID and MTD fields such as erase size, page size and OOB
|
||||
* size have been set up. ECC requirements are available if
|
||||
* provided by the NAND chip or device tree. Typically used to
|
||||
* choose the appropriate ECC configuration and allocate
|
||||
* associated resources.
|
||||
* This hook is optional.
|
||||
* @detach_chip: free all resources allocated/claimed in
|
||||
* nand_controller_ops->attach_chip().
|
||||
* This hook is optional.
|
||||
*/
|
||||
struct nand_controller_ops {
|
||||
int (*attach_chip)(struct nand_chip *chip);
|
||||
void (*detach_chip)(struct nand_chip *chip);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nand_controller - Structure used to describe a NAND controller
|
||||
*
|
||||
* @lock: protection lock
|
||||
* @active: the mtd device which holds the controller currently
|
||||
* @wq: wait queue to sleep on if a NAND operation is in
|
||||
* progress used instead of the per chip wait queue
|
||||
* when a hw controller is available.
|
||||
* @ops: NAND controller operations.
|
||||
*/
|
||||
struct nand_hw_control {
|
||||
struct nand_controller {
|
||||
spinlock_t lock;
|
||||
struct nand_chip *active;
|
||||
wait_queue_head_t wq;
|
||||
const struct nand_controller_ops *ops;
|
||||
};
|
||||
|
||||
static inline void nand_hw_control_init(struct nand_hw_control *nfc)
|
||||
static inline void nand_controller_init(struct nand_controller *nfc)
|
||||
{
|
||||
nfc->active = NULL;
|
||||
spin_lock_init(&nfc->lock);
|
||||
@@ -778,11 +806,15 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
|
||||
* implementation) if any.
|
||||
* @cleanup: the ->init() function may have allocated resources, ->cleanup()
|
||||
* is here to let vendor specific code release those resources.
|
||||
* @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
|
||||
* page. This is called after the checksum is verified.
|
||||
*/
|
||||
struct nand_manufacturer_ops {
|
||||
void (*detect)(struct nand_chip *chip);
|
||||
int (*init)(struct nand_chip *chip);
|
||||
void (*cleanup)(struct nand_chip *chip);
|
||||
void (*fixup_onfi_param_page)(struct nand_chip *chip,
|
||||
struct nand_onfi_params *p);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -986,14 +1018,14 @@ struct nand_subop {
|
||||
unsigned int last_instr_end_off;
|
||||
};
|
||||
|
||||
int nand_subop_get_addr_start_off(const struct nand_subop *subop,
|
||||
unsigned int op_id);
|
||||
int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
|
||||
unsigned int op_id);
|
||||
int nand_subop_get_data_start_off(const struct nand_subop *subop,
|
||||
unsigned int op_id);
|
||||
int nand_subop_get_data_len(const struct nand_subop *subop,
|
||||
unsigned int op_id);
|
||||
unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
|
||||
unsigned int op_id);
|
||||
unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
|
||||
unsigned int op_id);
|
||||
unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
|
||||
unsigned int op_id);
|
||||
unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
|
||||
unsigned int op_id);
|
||||
|
||||
/**
|
||||
* struct nand_op_parser_addr_constraints - Constraints for address instructions
|
||||
@@ -1176,9 +1208,9 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
|
||||
* setting the read-retry mode. Mostly needed for MLC NAND.
|
||||
* @ecc: [BOARDSPECIFIC] ECC control structure
|
||||
* @buf_align: minimum buffer alignment required by a platform
|
||||
* @hwcontrol: platform-specific hardware control structure
|
||||
* @dummy_controller: dummy controller implementation for drivers that can
|
||||
* only control a single chip
|
||||
* @erase: [REPLACEABLE] erase function
|
||||
* @scan_bbt: [REPLACEABLE] function to scan bad block table
|
||||
* @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
|
||||
* data from array to read regs (tR).
|
||||
* @state: [INTERN] the current state of the NAND device
|
||||
@@ -1271,7 +1303,6 @@ struct nand_chip {
|
||||
const struct nand_operation *op,
|
||||
bool check_only);
|
||||
int (*erase)(struct mtd_info *mtd, int page);
|
||||
int (*scan_bbt)(struct mtd_info *mtd);
|
||||
int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
int feature_addr, uint8_t *subfeature_para);
|
||||
int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
@@ -1314,11 +1345,11 @@ struct nand_chip {
|
||||
flstate_t state;
|
||||
|
||||
uint8_t *oob_poi;
|
||||
struct nand_hw_control *controller;
|
||||
struct nand_controller *controller;
|
||||
|
||||
struct nand_ecc_ctrl ecc;
|
||||
unsigned long buf_align;
|
||||
struct nand_hw_control hwcontrol;
|
||||
struct nand_controller dummy_controller;
|
||||
|
||||
uint8_t *bbt;
|
||||
struct nand_bbt_descr *bbt_td;
|
||||
@@ -1517,14 +1548,12 @@ extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
|
||||
extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
|
||||
extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
|
||||
|
||||
int nand_default_bbt(struct mtd_info *mtd);
|
||||
int nand_create_bbt(struct nand_chip *chip);
|
||||
int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
|
||||
int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
|
||||
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
|
||||
int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
|
||||
int allowbbt);
|
||||
int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
|
||||
size_t *retlen, uint8_t *buf);
|
||||
|
||||
/**
|
||||
* struct platform_nand_chip - chip level device structure
|
||||
@@ -1555,14 +1584,12 @@ struct platform_device;
|
||||
* struct platform_nand_ctrl - controller level device structure
|
||||
* @probe: platform specific function to probe/setup hardware
|
||||
* @remove: platform specific function to remove/teardown hardware
|
||||
* @hwcontrol: platform specific hardware control structure
|
||||
* @dev_ready: platform specific function to read ready/busy pin
|
||||
* @select_chip: platform specific chip select function
|
||||
* @cmd_ctrl: platform specific function for controlling
|
||||
* ALE/CLE/nCE. Also used to write command and address
|
||||
* @write_buf: platform specific function for write buffer
|
||||
* @read_buf: platform specific function for read buffer
|
||||
* @read_byte: platform specific function to read one byte from chip
|
||||
* @priv: private data to transport driver specific settings
|
||||
*
|
||||
* All fields are optional and depend on the hardware driver requirements
|
||||
@@ -1570,13 +1597,11 @@ struct platform_device;
|
||||
struct platform_nand_ctrl {
|
||||
int (*probe)(struct platform_device *pdev);
|
||||
void (*remove)(struct platform_device *pdev);
|
||||
void (*hwcontrol)(struct mtd_info *mtd, int cmd);
|
||||
int (*dev_ready)(struct mtd_info *mtd);
|
||||
void (*select_chip)(struct mtd_info *mtd, int chip);
|
||||
void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
|
||||
void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
|
||||
void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
|
||||
unsigned char (*read_byte)(struct mtd_info *mtd);
|
||||
void *priv;
|
||||
};
|
||||
|
||||
@@ -1593,10 +1618,10 @@ struct platform_nand_data {
|
||||
/* return the supported asynchronous timing mode. */
|
||||
static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
|
||||
{
|
||||
if (!chip->parameters.onfi.version)
|
||||
if (!chip->parameters.onfi)
|
||||
return ONFI_TIMING_MODE_UNKNOWN;
|
||||
|
||||
return chip->parameters.onfi.async_timing_mode;
|
||||
return chip->parameters.onfi->async_timing_mode;
|
||||
}
|
||||
|
||||
int onfi_fill_data_interface(struct nand_chip *chip,
|
||||
@@ -1641,14 +1666,8 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
|
||||
void *extraoob, int extraooblen,
|
||||
int threshold);
|
||||
|
||||
int nand_check_ecc_caps(struct nand_chip *chip,
|
||||
const struct nand_ecc_caps *caps, int oobavail);
|
||||
|
||||
int nand_match_ecc_req(struct nand_chip *chip,
|
||||
const struct nand_ecc_caps *caps, int oobavail);
|
||||
|
||||
int nand_maximize_ecc(struct nand_chip *chip,
|
||||
const struct nand_ecc_caps *caps, int oobavail);
|
||||
int nand_ecc_choose_conf(struct nand_chip *chip,
|
||||
const struct nand_ecc_caps *caps, int oobavail);
|
||||
|
||||
/* Default write_oob implementation */
|
||||
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
|
||||
@@ -1674,10 +1693,14 @@ int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
/* Default read_page_raw implementation */
|
||||
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
uint8_t *buf, int oob_required, int page);
|
||||
int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
u8 *buf, int oob_required, int page);
|
||||
|
||||
/* Default write_page_raw implementation */
|
||||
int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
const uint8_t *buf, int oob_required, int page);
|
||||
int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
const u8 *buf, int oob_required, int page);
|
||||
|
||||
/* Reset and initialize a NAND device */
|
||||
int nand_reset(struct nand_chip *chip, int chipnr);
|
||||
@@ -1711,8 +1734,13 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
|
||||
int nand_write_data_op(struct nand_chip *chip, const void *buf,
|
||||
unsigned int len, bool force_8bit);
|
||||
|
||||
/* Free resources held by the NAND device */
|
||||
/*
|
||||
* Free resources held by the NAND device, must be called on error after a
|
||||
* sucessful nand_scan().
|
||||
*/
|
||||
void nand_cleanup(struct nand_chip *chip);
|
||||
/* Unregister the MTD device and calls nand_cleanup() */
|
||||
void nand_release(struct mtd_info *mtd);
|
||||
|
||||
/* Default extended ID decoding function */
|
||||
void nand_decode_ext_id(struct nand_chip *chip);
|
||||
|
@@ -235,6 +235,7 @@ enum spi_nor_option_flags {
|
||||
SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
|
||||
SNOR_F_READY_XSR_RDY = BIT(4),
|
||||
SNOR_F_USE_CLSR = BIT(5),
|
||||
SNOR_F_BROKEN_RESET = BIT(6),
|
||||
};
|
||||
|
||||
/**
|
||||
|
421
include/linux/mtd/spinand.h
Normal file
421
include/linux/mtd/spinand.h
Normal file
@@ -0,0 +1,421 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Micron Technology, Inc.
|
||||
*
|
||||
* Authors:
|
||||
* Peter Pan <peterpandong@micron.com>
|
||||
*/
|
||||
#ifndef __LINUX_MTD_SPINAND_H
|
||||
#define __LINUX_MTD_SPINAND_H
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/nand.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/spi/spi-mem.h>
|
||||
|
||||
/**
|
||||
* Standard SPI NAND flash operations
|
||||
*/
|
||||
|
||||
#define SPINAND_RESET_OP \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
|
||||
SPI_MEM_OP_NO_ADDR, \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA)
|
||||
|
||||
#define SPINAND_WR_EN_DIS_OP(enable) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
|
||||
SPI_MEM_OP_NO_ADDR, \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA)
|
||||
|
||||
#define SPINAND_READID_OP(ndummy, buf, len) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
|
||||
SPI_MEM_OP_NO_ADDR, \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 1), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 1))
|
||||
|
||||
#define SPINAND_SET_FEATURE_OP(reg, valptr) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
|
||||
SPI_MEM_OP_ADDR(1, reg, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_DATA_OUT(1, valptr, 1))
|
||||
|
||||
#define SPINAND_GET_FEATURE_OP(reg, valptr) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
|
||||
SPI_MEM_OP_ADDR(1, reg, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_DATA_IN(1, valptr, 1))
|
||||
|
||||
#define SPINAND_BLK_ERASE_OP(addr) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
|
||||
SPI_MEM_OP_ADDR(3, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA)
|
||||
|
||||
#define SPINAND_PAGE_READ_OP(addr) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
|
||||
SPI_MEM_OP_ADDR(3, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA)
|
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 1), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 1))
|
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 1), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 2))
|
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 1), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 4))
|
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 2), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 2), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 2))
|
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 4), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 4), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 4))
|
||||
|
||||
#define SPINAND_PROG_EXEC_OP(addr) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
|
||||
SPI_MEM_OP_ADDR(3, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA)
|
||||
|
||||
#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_DATA_OUT(len, buf, 1))
|
||||
|
||||
#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_DATA_OUT(len, buf, 4))
|
||||
|
||||
/**
|
||||
* Standard SPI NAND flash commands
|
||||
*/
|
||||
#define SPINAND_CMD_PROG_LOAD_X4 0x32
|
||||
#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
|
||||
|
||||
/* feature register */
|
||||
#define REG_BLOCK_LOCK 0xa0
|
||||
#define BL_ALL_UNLOCKED 0x00
|
||||
|
||||
/* configuration register */
|
||||
#define REG_CFG 0xb0
|
||||
#define CFG_OTP_ENABLE BIT(6)
|
||||
#define CFG_ECC_ENABLE BIT(4)
|
||||
#define CFG_QUAD_ENABLE BIT(0)
|
||||
|
||||
/* status register */
|
||||
#define REG_STATUS 0xc0
|
||||
#define STATUS_BUSY BIT(0)
|
||||
#define STATUS_ERASE_FAILED BIT(2)
|
||||
#define STATUS_PROG_FAILED BIT(3)
|
||||
#define STATUS_ECC_MASK GENMASK(5, 4)
|
||||
#define STATUS_ECC_NO_BITFLIPS (0 << 4)
|
||||
#define STATUS_ECC_HAS_BITFLIPS (1 << 4)
|
||||
#define STATUS_ECC_UNCOR_ERROR (2 << 4)
|
||||
|
||||
struct spinand_op;
|
||||
struct spinand_device;
|
||||
|
||||
#define SPINAND_MAX_ID_LEN 4
|
||||
|
||||
/**
|
||||
* struct spinand_id - SPI NAND id structure
|
||||
* @data: buffer containing the id bytes. Currently 4 bytes large, but can
|
||||
* be extended if required
|
||||
* @len: ID length
|
||||
*
|
||||
* struct_spinand_id->data contains all bytes returned after a READ_ID command,
|
||||
* including dummy bytes if the chip does not emit ID bytes right after the
|
||||
* READ_ID command. The responsibility to extract real ID bytes is left to
|
||||
* struct_manufacurer_ops->detect().
|
||||
*/
|
||||
struct spinand_id {
|
||||
u8 data[SPINAND_MAX_ID_LEN];
|
||||
int len;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct manufacurer_ops - SPI NAND manufacturer specific operations
|
||||
* @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
|
||||
* the core calls the struct_manufacurer_ops->detect() hook of each
|
||||
* registered manufacturer until one of them return 1. Note that
|
||||
* the first thing to check in this hook is that the manufacturer ID
|
||||
* in struct_spinand_device->id matches the manufacturer whose
|
||||
* ->detect() hook has been called. Should return 1 if there's a
|
||||
* match, 0 if the manufacturer ID does not match and a negative
|
||||
* error code otherwise. When true is returned, the core assumes
|
||||
* that properties of the NAND chip (spinand->base.memorg and
|
||||
* spinand->base.eccreq) have been filled
|
||||
* @init: initialize a SPI NAND device
|
||||
* @cleanup: cleanup a SPI NAND device
|
||||
*
|
||||
* Each SPI NAND manufacturer driver should implement this interface so that
|
||||
* NAND chips coming from this vendor can be detected and initialized properly.
|
||||
*/
|
||||
struct spinand_manufacturer_ops {
|
||||
int (*detect)(struct spinand_device *spinand);
|
||||
int (*init)(struct spinand_device *spinand);
|
||||
void (*cleanup)(struct spinand_device *spinand);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct spinand_manufacturer - SPI NAND manufacturer instance
|
||||
* @id: manufacturer ID
|
||||
* @name: manufacturer name
|
||||
* @ops: manufacturer operations
|
||||
*/
|
||||
struct spinand_manufacturer {
|
||||
u8 id;
|
||||
char *name;
|
||||
const struct spinand_manufacturer_ops *ops;
|
||||
};
|
||||
|
||||
/* SPI NAND manufacturers */
|
||||
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
|
||||
extern const struct spinand_manufacturer micron_spinand_manufacturer;
|
||||
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
|
||||
|
||||
/**
|
||||
* struct spinand_op_variants - SPI NAND operation variants
|
||||
* @ops: the list of variants for a given operation
|
||||
* @nops: the number of variants
|
||||
*
|
||||
* Some operations like read-from-cache/write-to-cache have several variants
|
||||
* depending on the number of IO lines you use to transfer data or address
|
||||
* cycles. This structure is a way to describe the different variants supported
|
||||
* by a chip and let the core pick the best one based on the SPI mem controller
|
||||
* capabilities.
|
||||
*/
|
||||
struct spinand_op_variants {
|
||||
const struct spi_mem_op *ops;
|
||||
unsigned int nops;
|
||||
};
|
||||
|
||||
#define SPINAND_OP_VARIANTS(name, ...) \
|
||||
const struct spinand_op_variants name = { \
|
||||
.ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
|
||||
.nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
|
||||
sizeof(struct spi_mem_op), \
|
||||
}
|
||||
|
||||
/**
|
||||
* spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
|
||||
* chip
|
||||
* @get_status: get the ECC status. Should return a positive number encoding
|
||||
* the number of corrected bitflips if correction was possible or
|
||||
* -EBADMSG if there are uncorrectable errors. I can also return
|
||||
* other negative error codes if the error is not caused by
|
||||
* uncorrectable bitflips
|
||||
* @ooblayout: the OOB layout used by the on-die ECC implementation
|
||||
*/
|
||||
struct spinand_ecc_info {
|
||||
int (*get_status)(struct spinand_device *spinand, u8 status);
|
||||
const struct mtd_ooblayout_ops *ooblayout;
|
||||
};
|
||||
|
||||
#define SPINAND_HAS_QE_BIT BIT(0)
|
||||
|
||||
/**
|
||||
* struct spinand_info - Structure used to describe SPI NAND chips
|
||||
* @model: model name
|
||||
* @devid: device ID
|
||||
* @flags: OR-ing of the SPINAND_XXX flags
|
||||
* @memorg: memory organization
|
||||
* @eccreq: ECC requirements
|
||||
* @eccinfo: on-die ECC info
|
||||
* @op_variants: operations variants
|
||||
* @op_variants.read_cache: variants of the read-cache operation
|
||||
* @op_variants.write_cache: variants of the write-cache operation
|
||||
* @op_variants.update_cache: variants of the update-cache operation
|
||||
* @select_target: function used to select a target/die. Required only for
|
||||
* multi-die chips
|
||||
*
|
||||
* Each SPI NAND manufacturer driver should have a spinand_info table
|
||||
* describing all the chips supported by the driver.
|
||||
*/
|
||||
struct spinand_info {
|
||||
const char *model;
|
||||
u8 devid;
|
||||
u32 flags;
|
||||
struct nand_memory_organization memorg;
|
||||
struct nand_ecc_req eccreq;
|
||||
struct spinand_ecc_info eccinfo;
|
||||
struct {
|
||||
const struct spinand_op_variants *read_cache;
|
||||
const struct spinand_op_variants *write_cache;
|
||||
const struct spinand_op_variants *update_cache;
|
||||
} op_variants;
|
||||
int (*select_target)(struct spinand_device *spinand,
|
||||
unsigned int target);
|
||||
};
|
||||
|
||||
#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
|
||||
{ \
|
||||
.read_cache = __read, \
|
||||
.write_cache = __write, \
|
||||
.update_cache = __update, \
|
||||
}
|
||||
|
||||
#define SPINAND_ECCINFO(__ooblayout, __get_status) \
|
||||
.eccinfo = { \
|
||||
.ooblayout = __ooblayout, \
|
||||
.get_status = __get_status, \
|
||||
}
|
||||
|
||||
#define SPINAND_SELECT_TARGET(__func) \
|
||||
.select_target = __func,
|
||||
|
||||
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
|
||||
__flags, ...) \
|
||||
{ \
|
||||
.model = __model, \
|
||||
.devid = __id, \
|
||||
.memorg = __memorg, \
|
||||
.eccreq = __eccreq, \
|
||||
.op_variants = __op_variants, \
|
||||
.flags = __flags, \
|
||||
__VA_ARGS__ \
|
||||
}
|
||||
|
||||
/**
|
||||
* struct spinand_device - SPI NAND device instance
|
||||
* @base: NAND device instance
|
||||
* @spimem: pointer to the SPI mem object
|
||||
* @lock: lock used to serialize accesses to the NAND
|
||||
* @id: NAND ID as returned by READ_ID
|
||||
* @flags: NAND flags
|
||||
* @op_templates: various SPI mem op templates
|
||||
* @op_templates.read_cache: read cache op template
|
||||
* @op_templates.write_cache: write cache op template
|
||||
* @op_templates.update_cache: update cache op template
|
||||
* @select_target: select a specific target/die. Usually called before sending
|
||||
* a command addressing a page or an eraseblock embedded in
|
||||
* this die. Only required if your chip exposes several dies
|
||||
* @cur_target: currently selected target/die
|
||||
* @eccinfo: on-die ECC information
|
||||
* @cfg_cache: config register cache. One entry per die
|
||||
* @databuf: bounce buffer for data
|
||||
* @oobbuf: bounce buffer for OOB data
|
||||
* @scratchbuf: buffer used for everything but page accesses. This is needed
|
||||
* because the spi-mem interface explicitly requests that buffers
|
||||
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
|
||||
* the stack
|
||||
* @manufacturer: SPI NAND manufacturer information
|
||||
* @priv: manufacturer private data
|
||||
*/
|
||||
struct spinand_device {
|
||||
struct nand_device base;
|
||||
struct spi_mem *spimem;
|
||||
struct mutex lock;
|
||||
struct spinand_id id;
|
||||
u32 flags;
|
||||
|
||||
struct {
|
||||
const struct spi_mem_op *read_cache;
|
||||
const struct spi_mem_op *write_cache;
|
||||
const struct spi_mem_op *update_cache;
|
||||
} op_templates;
|
||||
|
||||
int (*select_target)(struct spinand_device *spinand,
|
||||
unsigned int target);
|
||||
unsigned int cur_target;
|
||||
|
||||
struct spinand_ecc_info eccinfo;
|
||||
|
||||
u8 *cfg_cache;
|
||||
u8 *databuf;
|
||||
u8 *oobbuf;
|
||||
u8 *scratchbuf;
|
||||
const struct spinand_manufacturer *manufacturer;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
/**
|
||||
* mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
|
||||
* @mtd: MTD instance
|
||||
*
|
||||
* Return: the SPI NAND device attached to @mtd.
|
||||
*/
|
||||
static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
|
||||
{
|
||||
return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
|
||||
}
|
||||
|
||||
/**
|
||||
* spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
|
||||
* @spinand: SPI NAND device
|
||||
*
|
||||
* Return: the MTD device embedded in @spinand.
|
||||
*/
|
||||
static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
|
||||
{
|
||||
return nanddev_to_mtd(&spinand->base);
|
||||
}
|
||||
|
||||
/**
|
||||
* nand_to_spinand() - Get the SPI NAND device embedding an NAND object
|
||||
* @nand: NAND object
|
||||
*
|
||||
* Return: the SPI NAND device embedding @nand.
|
||||
*/
|
||||
static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
|
||||
{
|
||||
return container_of(nand, struct spinand_device, base);
|
||||
}
|
||||
|
||||
/**
|
||||
* spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
|
||||
* @spinand: SPI NAND device
|
||||
*
|
||||
* Return: the NAND device embedded in @spinand.
|
||||
*/
|
||||
static inline struct nand_device *
|
||||
spinand_to_nand(struct spinand_device *spinand)
|
||||
{
|
||||
return &spinand->base;
|
||||
}
|
||||
|
||||
/**
|
||||
* spinand_set_of_node - Attach a DT node to a SPI NAND device
|
||||
* @spinand: SPI NAND device
|
||||
* @np: DT node
|
||||
*
|
||||
* Attach a DT node to a SPI NAND device.
|
||||
*/
|
||||
static inline void spinand_set_of_node(struct spinand_device *spinand,
|
||||
struct device_node *np)
|
||||
{
|
||||
nanddev_set_of_node(&spinand->base, np);
|
||||
}
|
||||
|
||||
int spinand_match_and_init(struct spinand_device *dev,
|
||||
const struct spinand_info *table,
|
||||
unsigned int table_size, u8 devid);
|
||||
|
||||
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
|
||||
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
|
||||
|
||||
#endif /* __LINUX_MTD_SPINAND_H */
|
@@ -114,7 +114,7 @@ struct socket {
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
struct socket_wq __rcu *wq;
|
||||
struct socket_wq *wq;
|
||||
|
||||
struct file *file;
|
||||
struct sock *sk;
|
||||
|
@@ -79,6 +79,7 @@ enum {
|
||||
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
|
||||
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
|
||||
NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
|
||||
NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */
|
||||
|
||||
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
|
||||
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
|
||||
@@ -151,6 +152,7 @@ enum {
|
||||
#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
|
||||
#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
|
||||
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
|
||||
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
|
||||
|
||||
#define for_each_netdev_feature(mask_addr, bit) \
|
||||
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
|
||||
|
@@ -302,6 +302,17 @@ struct netdev_boot_setup {
|
||||
|
||||
int __init netdev_boot_setup(char *str);
|
||||
|
||||
struct gro_list {
|
||||
struct list_head list;
|
||||
int count;
|
||||
};
|
||||
|
||||
/*
|
||||
* size of gro hash buckets, must less than bit number of
|
||||
* napi_struct::gro_bitmask
|
||||
*/
|
||||
#define GRO_HASH_BUCKETS 8
|
||||
|
||||
/*
|
||||
* Structure for NAPI scheduling similar to tasklet but with weighting
|
||||
*/
|
||||
@@ -316,13 +327,13 @@ struct napi_struct {
|
||||
|
||||
unsigned long state;
|
||||
int weight;
|
||||
unsigned int gro_count;
|
||||
unsigned long gro_bitmask;
|
||||
int (*poll)(struct napi_struct *, int);
|
||||
#ifdef CONFIG_NETPOLL
|
||||
int poll_owner;
|
||||
#endif
|
||||
struct net_device *dev;
|
||||
struct sk_buff *gro_list;
|
||||
struct gro_list gro_hash[GRO_HASH_BUCKETS];
|
||||
struct sk_buff *skb;
|
||||
struct hrtimer timer;
|
||||
struct list_head dev_list;
|
||||
@@ -569,6 +580,9 @@ struct netdev_queue {
|
||||
* (/sys/class/net/DEV/Q/trans_timeout)
|
||||
*/
|
||||
unsigned long trans_timeout;
|
||||
|
||||
/* Subordinate device that the queue has been assigned to */
|
||||
struct net_device *sb_dev;
|
||||
/*
|
||||
* write-mostly part
|
||||
*/
|
||||
@@ -730,10 +744,15 @@ struct xps_map {
|
||||
*/
|
||||
struct xps_dev_maps {
|
||||
struct rcu_head rcu;
|
||||
struct xps_map __rcu *cpu_map[0];
|
||||
struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
|
||||
};
|
||||
#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
|
||||
|
||||
#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
|
||||
(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
|
||||
|
||||
#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
|
||||
(_rxqs * (_tcs) * sizeof(struct xps_map *)))
|
||||
|
||||
#endif /* CONFIG_XPS */
|
||||
|
||||
#define TC_MAX_QUEUE 16
|
||||
@@ -779,7 +798,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
|
||||
}
|
||||
|
||||
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
|
||||
enum tc_setup_type {
|
||||
TC_SETUP_QDISC_MQPRIO,
|
||||
@@ -792,6 +812,7 @@ enum tc_setup_type {
|
||||
TC_SETUP_QDISC_RED,
|
||||
TC_SETUP_QDISC_PRIO,
|
||||
TC_SETUP_QDISC_MQ,
|
||||
TC_SETUP_QDISC_ETF,
|
||||
};
|
||||
|
||||
/* These structures hold the attributes of bpf state that are being passed
|
||||
@@ -807,11 +828,8 @@ enum bpf_netdev_command {
|
||||
*/
|
||||
XDP_SETUP_PROG,
|
||||
XDP_SETUP_PROG_HW,
|
||||
/* Check if a bpf program is set on the device. The callee should
|
||||
* set @prog_attached to one of XDP_ATTACHED_* values, note that "true"
|
||||
* is equivalent to XDP_ATTACHED_DRV.
|
||||
*/
|
||||
XDP_QUERY_PROG,
|
||||
XDP_QUERY_PROG_HW,
|
||||
/* BPF program for offload callbacks, invoked at program load time. */
|
||||
BPF_OFFLOAD_VERIFIER_PREP,
|
||||
BPF_OFFLOAD_TRANSLATE,
|
||||
@@ -835,9 +853,8 @@ struct netdev_bpf {
|
||||
struct bpf_prog *prog;
|
||||
struct netlink_ext_ack *extack;
|
||||
};
|
||||
/* XDP_QUERY_PROG */
|
||||
/* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
|
||||
struct {
|
||||
u8 prog_attached;
|
||||
u32 prog_id;
|
||||
/* flags with which program was installed */
|
||||
u32 prog_flags;
|
||||
@@ -855,10 +872,10 @@ struct netdev_bpf {
|
||||
struct {
|
||||
struct bpf_offloaded_map *offmap;
|
||||
};
|
||||
/* XDP_SETUP_XSK_UMEM */
|
||||
/* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
|
||||
struct {
|
||||
struct xdp_umem *umem;
|
||||
u16 queue_id;
|
||||
struct xdp_umem *umem; /* out for query*/
|
||||
u16 queue_id; /* in for query */
|
||||
} xsk;
|
||||
};
|
||||
};
|
||||
@@ -891,6 +908,8 @@ struct tlsdev_ops {
|
||||
void (*tls_dev_del)(struct net_device *netdev,
|
||||
struct tls_context *ctx,
|
||||
enum tls_offload_ctx_dir direction);
|
||||
void (*tls_dev_resync_rx)(struct net_device *netdev,
|
||||
struct sock *sk, u32 seq, u64 rcd_sn);
|
||||
};
|
||||
#endif
|
||||
|
||||
@@ -942,7 +961,8 @@ struct dev_ifalias {
|
||||
* those the driver believes to be appropriate.
|
||||
*
|
||||
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
|
||||
* void *accel_priv, select_queue_fallback_t fallback);
|
||||
* struct net_device *sb_dev,
|
||||
* select_queue_fallback_t fallback);
|
||||
* Called to decide which queue to use when device supports multiple
|
||||
* transmit queues.
|
||||
*
|
||||
@@ -1214,7 +1234,7 @@ struct net_device_ops {
|
||||
netdev_features_t features);
|
||||
u16 (*ndo_select_queue)(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
void (*ndo_change_rx_flags)(struct net_device *dev,
|
||||
int flags);
|
||||
@@ -1909,7 +1929,8 @@ struct net_device {
|
||||
int watchdog_timeo;
|
||||
|
||||
#ifdef CONFIG_XPS
|
||||
struct xps_dev_maps __rcu *xps_maps;
|
||||
struct xps_dev_maps __rcu *xps_cpus_map;
|
||||
struct xps_dev_maps __rcu *xps_rxqs_map;
|
||||
#endif
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
struct mini_Qdisc __rcu *miniq_egress;
|
||||
@@ -1978,7 +1999,7 @@ struct net_device {
|
||||
#ifdef CONFIG_DCB
|
||||
const struct dcbnl_rtnl_ops *dcbnl_ops;
|
||||
#endif
|
||||
u8 num_tc;
|
||||
s16 num_tc;
|
||||
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
|
||||
u8 prio_tc_map[TC_BITMASK + 1];
|
||||
|
||||
@@ -2032,6 +2053,17 @@ int netdev_get_num_tc(struct net_device *dev)
|
||||
return dev->num_tc;
|
||||
}
|
||||
|
||||
void netdev_unbind_sb_channel(struct net_device *dev,
|
||||
struct net_device *sb_dev);
|
||||
int netdev_bind_sb_channel_queue(struct net_device *dev,
|
||||
struct net_device *sb_dev,
|
||||
u8 tc, u16 count, u16 offset);
|
||||
int netdev_set_sb_channel(struct net_device *dev, u16 channel);
|
||||
static inline int netdev_get_sb_channel(struct net_device *dev)
|
||||
{
|
||||
return max_t(int, -dev->num_tc, 0);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
|
||||
unsigned int index)
|
||||
@@ -2076,7 +2108,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
|
||||
|
||||
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv);
|
||||
struct net_device *sb_dev);
|
||||
|
||||
/* returns the headroom that the master device needs to take in account
|
||||
* when forwarding to this dev
|
||||
@@ -2255,10 +2287,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
|
||||
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
|
||||
}
|
||||
|
||||
typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
|
||||
static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
|
||||
struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
|
||||
static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
|
||||
struct list_head *head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(gro_recursion_inc_test(skb))) {
|
||||
NAPI_GRO_CB(skb)->flush |= 1;
|
||||
@@ -2268,12 +2300,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
|
||||
return cb(head, skb);
|
||||
}
|
||||
|
||||
typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
|
||||
struct sk_buff *);
|
||||
static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
|
||||
struct sock *sk,
|
||||
struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
|
||||
struct sk_buff *);
|
||||
static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
|
||||
struct sock *sk,
|
||||
struct list_head *head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(gro_recursion_inc_test(skb))) {
|
||||
NAPI_GRO_CB(skb)->flush |= 1;
|
||||
@@ -2290,6 +2322,9 @@ struct packet_type {
|
||||
struct net_device *,
|
||||
struct packet_type *,
|
||||
struct net_device *);
|
||||
void (*list_func) (struct list_head *,
|
||||
struct packet_type *,
|
||||
struct net_device *);
|
||||
bool (*id_match)(struct packet_type *ptype,
|
||||
struct sock *sk);
|
||||
void *af_packet_priv;
|
||||
@@ -2299,8 +2334,8 @@ struct packet_type {
|
||||
struct offload_callbacks {
|
||||
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
|
||||
netdev_features_t features);
|
||||
struct sk_buff **(*gro_receive)(struct sk_buff **head,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *(*gro_receive)(struct list_head *head,
|
||||
struct sk_buff *skb);
|
||||
int (*gro_complete)(struct sk_buff *skb, int nhoff);
|
||||
};
|
||||
|
||||
@@ -2537,8 +2572,14 @@ void dev_close(struct net_device *dev);
|
||||
void dev_close_many(struct list_head *head, bool unlink);
|
||||
void dev_disable_lro(struct net_device *dev);
|
||||
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
|
||||
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
int dev_queue_xmit(struct sk_buff *skb);
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
|
||||
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
||||
int register_netdevice(struct net_device *dev);
|
||||
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
|
||||
@@ -2568,7 +2609,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
|
||||
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
|
||||
int netdev_get_name(struct net *net, char *name, int ifindex);
|
||||
int dev_restart(struct net_device *dev);
|
||||
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
|
||||
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
|
||||
|
||||
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
|
||||
{
|
||||
@@ -2784,13 +2825,13 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
|
||||
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
|
||||
{
|
||||
if (PTR_ERR(pp) != -EINPROGRESS)
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
}
|
||||
static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
|
||||
struct sk_buff **pp,
|
||||
struct sk_buff *pp,
|
||||
int flush,
|
||||
struct gro_remcsum *grc)
|
||||
{
|
||||
@@ -2801,12 +2842,12 @@ static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
|
||||
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
|
||||
{
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
}
|
||||
static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
|
||||
struct sk_buff **pp,
|
||||
struct sk_buff *pp,
|
||||
int flush,
|
||||
struct gro_remcsum *grc)
|
||||
{
|
||||
@@ -3278,6 +3319,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
|
||||
#ifdef CONFIG_XPS
|
||||
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
|
||||
u16 index);
|
||||
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
|
||||
u16 index, bool is_rxqs_map);
|
||||
|
||||
/**
|
||||
* netif_attr_test_mask - Test a CPU or Rx queue set in a mask
|
||||
* @j: CPU/Rx queue index
|
||||
* @mask: bitmask of all cpus/rx queues
|
||||
* @nr_bits: number of bits in the bitmask
|
||||
*
|
||||
* Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
|
||||
*/
|
||||
static inline bool netif_attr_test_mask(unsigned long j,
|
||||
const unsigned long *mask,
|
||||
unsigned int nr_bits)
|
||||
{
|
||||
cpu_max_bits_warn(j, nr_bits);
|
||||
return test_bit(j, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* netif_attr_test_online - Test for online CPU/Rx queue
|
||||
* @j: CPU/Rx queue index
|
||||
* @online_mask: bitmask for CPUs/Rx queues that are online
|
||||
* @nr_bits: number of bits in the bitmask
|
||||
*
|
||||
* Returns true if a CPU/Rx queue is online.
|
||||
*/
|
||||
static inline bool netif_attr_test_online(unsigned long j,
|
||||
const unsigned long *online_mask,
|
||||
unsigned int nr_bits)
|
||||
{
|
||||
cpu_max_bits_warn(j, nr_bits);
|
||||
|
||||
if (online_mask)
|
||||
return test_bit(j, online_mask);
|
||||
|
||||
return (j < nr_bits);
|
||||
}
|
||||
|
||||
/**
|
||||
* netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
|
||||
* @n: CPU/Rx queue index
|
||||
* @srcp: the cpumask/Rx queue mask pointer
|
||||
* @nr_bits: number of bits in the bitmask
|
||||
*
|
||||
* Returns >= nr_bits if no further CPUs/Rx queues set.
|
||||
*/
|
||||
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
|
||||
unsigned int nr_bits)
|
||||
{
|
||||
/* -1 is a legal arg here. */
|
||||
if (n != -1)
|
||||
cpu_max_bits_warn(n, nr_bits);
|
||||
|
||||
if (srcp)
|
||||
return find_next_bit(srcp, nr_bits, n + 1);
|
||||
|
||||
return n + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
|
||||
* @n: CPU/Rx queue index
|
||||
* @src1p: the first CPUs/Rx queues mask pointer
|
||||
* @src2p: the second CPUs/Rx queues mask pointer
|
||||
* @nr_bits: number of bits in the bitmask
|
||||
*
|
||||
* Returns >= nr_bits if no further CPUs/Rx queues set in both.
|
||||
*/
|
||||
static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
|
||||
const unsigned long *src2p,
|
||||
unsigned int nr_bits)
|
||||
{
|
||||
/* -1 is a legal arg here. */
|
||||
if (n != -1)
|
||||
cpu_max_bits_warn(n, nr_bits);
|
||||
|
||||
if (src1p && src2p)
|
||||
return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
|
||||
else if (src1p)
|
||||
return find_next_bit(src1p, nr_bits, n + 1);
|
||||
else if (src2p)
|
||||
return find_next_bit(src2p, nr_bits, n + 1);
|
||||
|
||||
return n + 1;
|
||||
}
|
||||
#else
|
||||
static inline int netif_set_xps_queue(struct net_device *dev,
|
||||
const struct cpumask *mask,
|
||||
@@ -3285,6 +3412,13 @@ static inline int netif_set_xps_queue(struct net_device *dev,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __netif_set_xps_queue(struct net_device *dev,
|
||||
const unsigned long *mask,
|
||||
u16 index, bool is_rxqs_map)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
@@ -3304,8 +3438,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
|
||||
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
|
||||
#else
|
||||
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
|
||||
unsigned int rxq)
|
||||
unsigned int rxqs)
|
||||
{
|
||||
dev->real_num_rx_queues = rxqs;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -3384,6 +3519,7 @@ int netif_rx(struct sk_buff *skb);
|
||||
int netif_rx_ni(struct sk_buff *skb);
|
||||
int netif_receive_skb(struct sk_buff *skb);
|
||||
int netif_receive_skb_core(struct sk_buff *skb);
|
||||
void netif_receive_skb_list(struct list_head *head);
|
||||
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
|
||||
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
|
||||
struct sk_buff *napi_get_frags(struct napi_struct *napi);
|
||||
@@ -3418,6 +3554,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
|
||||
int dev_get_alias(const struct net_device *, char *, size_t);
|
||||
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
|
||||
int __dev_set_mtu(struct net_device *, int);
|
||||
int dev_set_mtu_ext(struct net_device *dev, int mtu,
|
||||
struct netlink_ext_ack *extack);
|
||||
int dev_set_mtu(struct net_device *, int);
|
||||
int dev_change_tx_queue_len(struct net_device *, unsigned long);
|
||||
void dev_set_group(struct net_device *, int);
|
||||
@@ -3435,8 +3573,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
|
||||
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
|
||||
int fd, u32 flags);
|
||||
void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
|
||||
struct netdev_bpf *xdp);
|
||||
u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
|
||||
enum bpf_netdev_command cmd);
|
||||
int xdp_umem_query(struct net_device *dev, u16 queue_id);
|
||||
|
||||
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
||||
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
||||
|
@@ -288,6 +288,24 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void
|
||||
NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
|
||||
struct list_head *head, struct net_device *in, struct net_device *out,
|
||||
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
|
||||
{
|
||||
struct sk_buff *skb, *next;
|
||||
struct list_head sublist;
|
||||
|
||||
INIT_LIST_HEAD(&sublist);
|
||||
list_for_each_entry_safe(skb, next, head, list) {
|
||||
list_del(&skb->list);
|
||||
if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
|
||||
list_add_tail(&skb->list, &sublist);
|
||||
}
|
||||
/* Put passed packets back on main list */
|
||||
list_splice(&sublist, head);
|
||||
}
|
||||
|
||||
/* Call setsockopt() */
|
||||
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
|
||||
unsigned int len);
|
||||
@@ -369,6 +387,14 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
|
||||
return okfn(net, sk, skb);
|
||||
}
|
||||
|
||||
static inline void
|
||||
NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
|
||||
struct list_head *head, struct net_device *in, struct net_device *out,
|
||||
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
|
||||
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
|
||||
struct sock *sk, struct sk_buff *skb,
|
||||
struct net_device *indev, struct net_device *outdev,
|
||||
@@ -388,8 +414,17 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
|
||||
|
||||
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
|
||||
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
|
||||
struct nf_conntrack_tuple;
|
||||
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
|
||||
const struct sk_buff *skb);
|
||||
#else
|
||||
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
|
||||
struct nf_conntrack_tuple;
|
||||
static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct nf_conn;
|
||||
@@ -398,6 +433,8 @@ enum ip_conntrack_info;
|
||||
struct nf_ct_hook {
|
||||
int (*update)(struct net *net, struct sk_buff *skb);
|
||||
void (*destroy)(struct nf_conntrack *);
|
||||
bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
|
||||
const struct sk_buff *);
|
||||
};
|
||||
extern struct nf_ct_hook __rcu *nf_ct_hook;
|
||||
|
||||
|
@@ -29,6 +29,7 @@ struct nfnetlink_subsystem {
|
||||
__u8 subsys_id; /* nfnetlink subsystem ID */
|
||||
__u8 cb_count; /* number of callbacks */
|
||||
const struct nfnl_callback *cb; /* callback for individual types */
|
||||
struct module *owner;
|
||||
int (*commit)(struct net *net, struct sk_buff *skb);
|
||||
int (*abort)(struct net *net, struct sk_buff *skb);
|
||||
void (*cleanup)(struct net *net);
|
||||
|
@@ -1,16 +1,8 @@
|
||||
#include <uapi/linux/netfilter/nf_osf.h>
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _NFOSF_H
|
||||
#define _NFOSF_H
|
||||
|
||||
/* Initial window size option state machine: multiple of mss, mtu or
|
||||
* plain numeric value. Can also be made as plain numeric value which
|
||||
* is not a multiple of specified value.
|
||||
*/
|
||||
enum nf_osf_window_size_options {
|
||||
OSF_WSS_PLAIN = 0,
|
||||
OSF_WSS_MSS,
|
||||
OSF_WSS_MTU,
|
||||
OSF_WSS_MODULO,
|
||||
OSF_WSS_MAX,
|
||||
};
|
||||
#include <uapi/linux/netfilter/nfnetlink_osf.h>
|
||||
|
||||
enum osf_fmatch_states {
|
||||
/* Packet does not match the fingerprint */
|
||||
@@ -21,6 +13,8 @@ enum osf_fmatch_states {
|
||||
FMATCH_OPT_WRONG,
|
||||
};
|
||||
|
||||
extern struct list_head nf_osf_fingers[2];
|
||||
|
||||
struct nf_osf_finger {
|
||||
struct rcu_head rcu_head;
|
||||
struct list_head finger_entry;
|
||||
@@ -31,3 +25,8 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
|
||||
int hooknum, struct net_device *in, struct net_device *out,
|
||||
const struct nf_osf_info *info, struct net *net,
|
||||
const struct list_head *nf_osf_fingers);
|
||||
|
||||
const char *nf_osf_find(const struct sk_buff *skb,
|
||||
const struct list_head *nf_osf_fingers);
|
||||
|
||||
#endif /* _NFOSF_H */
|
@@ -5,17 +5,6 @@
|
||||
#include <uapi/linux/netfilter_bridge.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
enum nf_br_hook_priorities {
|
||||
NF_BR_PRI_FIRST = INT_MIN,
|
||||
NF_BR_PRI_NAT_DST_BRIDGED = -300,
|
||||
NF_BR_PRI_FILTER_BRIDGED = -200,
|
||||
NF_BR_PRI_BRNF = 0,
|
||||
NF_BR_PRI_NAT_DST_OTHER = 100,
|
||||
NF_BR_PRI_FILTER_OTHER = 200,
|
||||
NF_BR_PRI_NAT_SRC = 300,
|
||||
NF_BR_PRI_LAST = INT_MAX,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
|
||||
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
|
||||
|
@@ -23,9 +23,6 @@ struct nf_queue_entry;
|
||||
#ifdef CONFIG_INET
|
||||
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, u_int8_t protocol);
|
||||
__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, unsigned int len,
|
||||
u_int8_t protocol);
|
||||
int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
|
||||
bool strict);
|
||||
int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
|
||||
@@ -35,14 +32,6 @@ static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
|
||||
unsigned int hook,
|
||||
unsigned int dataoff,
|
||||
unsigned int len,
|
||||
u_int8_t protocol)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
|
||||
struct flowi *fl, bool strict)
|
||||
{
|
||||
|
@@ -30,11 +30,6 @@ struct nf_ipv6_ops {
|
||||
void (*route_input)(struct sk_buff *skb);
|
||||
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
int (*output)(struct net *, struct sock *, struct sk_buff *));
|
||||
__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, u_int8_t protocol);
|
||||
__sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, unsigned int len,
|
||||
u_int8_t protocol);
|
||||
int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
|
||||
bool strict);
|
||||
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user