Merge branch 'linus' into x86/urgent, to pick up dependent changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
31
lib/Kconfig
31
lib/Kconfig
@@ -18,6 +18,23 @@ config RAID6_PQ_BENCHMARK
|
||||
Benchmark all available RAID6 PQ functions on init and choose the
|
||||
fastest one.
|
||||
|
||||
config PACKING
|
||||
bool "Generic bitfield packing and unpacking"
|
||||
default n
|
||||
help
|
||||
This option provides the packing() helper function, which permits
|
||||
converting bitfields between a CPU-usable representation and a
|
||||
memory representation that can have any combination of these quirks:
|
||||
- Is little endian (bytes are reversed within a 32-bit group)
|
||||
- The least-significant 32-bit word comes first (within a 64-bit
|
||||
group)
|
||||
- The most significant bit of a byte is at its right (bit 0 of a
|
||||
register description is numerically 2^7).
|
||||
Drivers may use these helpers to match the bit indices as described
|
||||
in the data sheets of the peripherals they are in control of.
|
||||
|
||||
When in doubt, say N.
|
||||
|
||||
config BITREVERSE
|
||||
tristate
|
||||
|
||||
@@ -29,9 +46,6 @@ config HAVE_ARCH_BITREVERSE
|
||||
This option enables the use of hardware bit-reversal instructions on
|
||||
architectures which support such operations.
|
||||
|
||||
config RATIONAL
|
||||
bool
|
||||
|
||||
config GENERIC_STRNCPY_FROM_USER
|
||||
bool
|
||||
|
||||
@@ -44,6 +58,8 @@ config GENERIC_NET_UTILS
|
||||
config GENERIC_FIND_FIRST_BIT
|
||||
bool
|
||||
|
||||
source "lib/math/Kconfig"
|
||||
|
||||
config NO_GENERIC_PCI_IOPORT_MAP
|
||||
bool
|
||||
|
||||
@@ -514,12 +530,6 @@ config LRU_CACHE
|
||||
config CLZ_TAB
|
||||
bool
|
||||
|
||||
config CORDIC
|
||||
tristate "CORDIC algorithm"
|
||||
help
|
||||
This option provides an implementation of the CORDIC algorithm;
|
||||
calculations are in fixed point. Module will be called cordic.
|
||||
|
||||
config DDR
|
||||
bool "JEDEC DDR data"
|
||||
help
|
||||
@@ -611,9 +621,6 @@ config SBITMAP
|
||||
config PARMAN
|
||||
tristate "parman" if COMPILE_TEST
|
||||
|
||||
config PRIME_NUMBERS
|
||||
tristate
|
||||
|
||||
config STRING_SELFTEST
|
||||
tristate "Test string functions"
|
||||
|
||||
|
@@ -219,6 +219,14 @@ config DEBUG_INFO_DWARF4
|
||||
But it significantly improves the success of resolving
|
||||
variables in gdb on optimized code.
|
||||
|
||||
config DEBUG_INFO_BTF
|
||||
bool "Generate BTF typeinfo"
|
||||
depends on DEBUG_INFO
|
||||
help
|
||||
Generate deduplicated BTF type information from DWARF debug info.
|
||||
Turning this on expects presence of pahole tool, which will convert
|
||||
DWARF type info into equivalent deduplicated BTF type info.
|
||||
|
||||
config GDB_SCRIPTS
|
||||
bool "Provide GDB scripts for kernel debugging"
|
||||
depends on DEBUG_INFO
|
||||
@@ -310,6 +318,20 @@ config HEADERS_CHECK
|
||||
exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
|
||||
your build tree), to make sure they're suitable.
|
||||
|
||||
config OPTIMIZE_INLINING
|
||||
bool "Allow compiler to uninline functions marked 'inline'"
|
||||
help
|
||||
This option determines if the kernel forces gcc to inline the functions
|
||||
developers have marked 'inline'. Doing so takes away freedom from gcc to
|
||||
do what it thinks is best, which is desirable for the gcc 3.x series of
|
||||
compilers. The gcc 4.x series have a rewritten inlining algorithm and
|
||||
enabling this option will generate a smaller kernel there. Hopefully
|
||||
this algorithm is so good that allowing gcc 4.x and above to make the
|
||||
decision will become the default in the future. Until then this option
|
||||
is there to test gcc for this.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config DEBUG_SECTION_MISMATCH
|
||||
bool "Enable full Section mismatch analysis"
|
||||
help
|
||||
@@ -438,6 +460,15 @@ config DEBUG_KERNEL
|
||||
Say Y here if you are developing drivers or trying to debug and
|
||||
identify kernel problems.
|
||||
|
||||
config DEBUG_MISC
|
||||
bool "Miscellaneous debug code"
|
||||
default DEBUG_KERNEL
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
Say Y here if you need to enable miscellaneous debug code that should
|
||||
be under a more specific debug option but isn't.
|
||||
|
||||
|
||||
menu "Memory Debugging"
|
||||
|
||||
source "mm/Kconfig.debug"
|
||||
@@ -1350,7 +1381,7 @@ config DEBUG_LIST
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config DEBUG_PI_LIST
|
||||
config DEBUG_PLIST
|
||||
bool "Debug priority linked list manipulation"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
@@ -1769,6 +1800,9 @@ config TEST_HEXDUMP
|
||||
config TEST_STRING_HELPERS
|
||||
tristate "Test functions located in the string_helpers module at runtime"
|
||||
|
||||
config TEST_STRSCPY
|
||||
tristate "Test strscpy*() family of functions at runtime"
|
||||
|
||||
config TEST_KSTRTOX
|
||||
tristate "Test kstrto*() family of functions at runtime"
|
||||
|
||||
@@ -1927,7 +1961,6 @@ config TEST_STATIC_KEYS
|
||||
config TEST_KMOD
|
||||
tristate "kmod stress tester"
|
||||
depends on m
|
||||
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
|
||||
depends on NETDEVICES && NET_CORE && INET # for TUN
|
||||
depends on BLOCK
|
||||
select TEST_LKM
|
||||
|
17
lib/Makefile
17
lib/Makefile
@@ -30,7 +30,7 @@ endif
|
||||
|
||||
lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
||||
rbtree.o radix-tree.o timerqueue.o xarray.o \
|
||||
idr.o int_sqrt.o extable.o \
|
||||
idr.o extable.o \
|
||||
sha1.o chacha.o irq_regs.o argv_split.o \
|
||||
flex_proportions.o ratelimit.o show_mem.o \
|
||||
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
|
||||
@@ -44,11 +44,11 @@ lib-$(CONFIG_SMP) += cpumask.o
|
||||
lib-y += kobject.o klist.o
|
||||
obj-y += lockref.o
|
||||
|
||||
obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
|
||||
obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
|
||||
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
|
||||
gcd.o lcm.o list_sort.o uuid.o iov_iter.o clz_ctz.o \
|
||||
list_sort.o uuid.o iov_iter.o clz_ctz.o \
|
||||
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
|
||||
percpu-refcount.o rhashtable.o reciprocal_div.o \
|
||||
percpu-refcount.o rhashtable.o \
|
||||
once.o refcount.o usercopy.o errseq.o bucket_locks.o \
|
||||
generic-radix-tree.o
|
||||
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
|
||||
@@ -81,6 +81,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
|
||||
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
|
||||
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
|
||||
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
|
||||
obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
|
||||
obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
|
||||
obj-$(CONFIG_TEST_UUID) += test_uuid.o
|
||||
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
|
||||
@@ -101,6 +102,8 @@ endif
|
||||
obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
|
||||
CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
|
||||
|
||||
obj-y += math/
|
||||
|
||||
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
|
||||
obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
|
||||
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
|
||||
@@ -119,7 +122,7 @@ obj-$(CONFIG_DEBUG_LIST) += list_debug.o
|
||||
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
|
||||
|
||||
obj-$(CONFIG_BITREVERSE) += bitrev.o
|
||||
obj-$(CONFIG_RATIONAL) += rational.o
|
||||
obj-$(CONFIG_PACKING) += packing.o
|
||||
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
|
||||
obj-$(CONFIG_CRC16) += crc16.o
|
||||
obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
|
||||
@@ -193,8 +196,6 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
|
||||
|
||||
obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
|
||||
|
||||
obj-$(CONFIG_CORDIC) += cordic.o
|
||||
|
||||
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
|
||||
|
||||
obj-$(CONFIG_GLOB) += glob.o
|
||||
@@ -236,8 +237,6 @@ obj-$(CONFIG_ASN1) += asn1_decoder.o
|
||||
|
||||
obj-$(CONFIG_FONT_SUPPORT) += fonts/
|
||||
|
||||
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
|
||||
|
||||
hostprogs-y := gen_crc32table
|
||||
hostprogs-y += gen_crc64table
|
||||
clean-files := crc32table.h
|
||||
|
@@ -385,6 +385,8 @@ next_op:
|
||||
case ASN1_OP_END_SET_ACT:
|
||||
if (unlikely(!(flags & FLAG_MATCHED)))
|
||||
goto tag_mismatch;
|
||||
/* fall through */
|
||||
|
||||
case ASN1_OP_END_SEQ:
|
||||
case ASN1_OP_END_SET_OF:
|
||||
case ASN1_OP_END_SEQ_OF:
|
||||
@@ -450,6 +452,8 @@ next_op:
|
||||
pc += asn1_op_lengths[op];
|
||||
goto next_op;
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case ASN1_OP_ACT:
|
||||
ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
|
||||
if (ret < 0)
|
||||
|
280
lib/bitmap.c
280
lib/bitmap.c
@@ -20,6 +20,8 @@
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#include "kstrtox.h"
|
||||
|
||||
/**
|
||||
* DOC: bitmap introduction
|
||||
*
|
||||
@@ -477,12 +479,128 @@ int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_print_to_pagebuf);
|
||||
|
||||
/*
|
||||
* Region 9-38:4/10 describes the following bitmap structure:
|
||||
* 0 9 12 18 38
|
||||
* .........****......****......****......
|
||||
* ^ ^ ^ ^
|
||||
* start off group_len end
|
||||
*/
|
||||
struct region {
|
||||
unsigned int start;
|
||||
unsigned int off;
|
||||
unsigned int group_len;
|
||||
unsigned int end;
|
||||
};
|
||||
|
||||
static int bitmap_set_region(const struct region *r,
|
||||
unsigned long *bitmap, int nbits)
|
||||
{
|
||||
unsigned int start;
|
||||
|
||||
if (r->end >= nbits)
|
||||
return -ERANGE;
|
||||
|
||||
for (start = r->start; start <= r->end; start += r->group_len)
|
||||
bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bitmap_check_region(const struct region *r)
|
||||
{
|
||||
if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *bitmap_getnum(const char *str, unsigned int *num)
|
||||
{
|
||||
unsigned long long n;
|
||||
unsigned int len;
|
||||
|
||||
len = _parse_integer(str, 10, &n);
|
||||
if (!len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
|
||||
return ERR_PTR(-EOVERFLOW);
|
||||
|
||||
*num = n;
|
||||
return str + len;
|
||||
}
|
||||
|
||||
static inline bool end_of_str(char c)
|
||||
{
|
||||
return c == '\0' || c == '\n';
|
||||
}
|
||||
|
||||
static inline bool __end_of_region(char c)
|
||||
{
|
||||
return isspace(c) || c == ',';
|
||||
}
|
||||
|
||||
static inline bool end_of_region(char c)
|
||||
{
|
||||
return __end_of_region(c) || end_of_str(c);
|
||||
}
|
||||
|
||||
/*
|
||||
* The format allows commas and whitespases at the beginning
|
||||
* of the region.
|
||||
*/
|
||||
static const char *bitmap_find_region(const char *str)
|
||||
{
|
||||
while (__end_of_region(*str))
|
||||
str++;
|
||||
|
||||
return end_of_str(*str) ? NULL : str;
|
||||
}
|
||||
|
||||
static const char *bitmap_parse_region(const char *str, struct region *r)
|
||||
{
|
||||
str = bitmap_getnum(str, &r->start);
|
||||
if (IS_ERR(str))
|
||||
return str;
|
||||
|
||||
if (end_of_region(*str))
|
||||
goto no_end;
|
||||
|
||||
if (*str != '-')
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
str = bitmap_getnum(str + 1, &r->end);
|
||||
if (IS_ERR(str))
|
||||
return str;
|
||||
|
||||
if (end_of_region(*str))
|
||||
goto no_pattern;
|
||||
|
||||
if (*str != ':')
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
str = bitmap_getnum(str + 1, &r->off);
|
||||
if (IS_ERR(str))
|
||||
return str;
|
||||
|
||||
if (*str != '/')
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return bitmap_getnum(str + 1, &r->group_len);
|
||||
|
||||
no_end:
|
||||
r->end = r->start;
|
||||
no_pattern:
|
||||
r->off = r->end + 1;
|
||||
r->group_len = r->end + 1;
|
||||
|
||||
return end_of_str(*str) ? NULL : str;
|
||||
}
|
||||
|
||||
/**
|
||||
* __bitmap_parselist - convert list format ASCII string to bitmap
|
||||
* @buf: read nul-terminated user string from this buffer
|
||||
* @buflen: buffer size in bytes. If string is smaller than this
|
||||
* then it must be terminated with a \0.
|
||||
* @is_user: location of buffer, 0 indicates kernel space
|
||||
* bitmap_parselist - convert list format ASCII string to bitmap
|
||||
* @buf: read user string from this buffer; must be terminated
|
||||
* with a \0 or \n.
|
||||
* @maskp: write resulting mask here
|
||||
* @nmaskbits: number of bits in mask to be written
|
||||
*
|
||||
@@ -498,127 +616,38 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
|
||||
*
|
||||
* Returns: 0 on success, -errno on invalid input strings. Error values:
|
||||
*
|
||||
* - ``-EINVAL``: second number in range smaller than first
|
||||
* - ``-EINVAL``: wrong region format
|
||||
* - ``-EINVAL``: invalid character in string
|
||||
* - ``-ERANGE``: bit number specified too large for mask
|
||||
* - ``-EOVERFLOW``: integer overflow in the input parameters
|
||||
*/
|
||||
static int __bitmap_parselist(const char *buf, unsigned int buflen,
|
||||
int is_user, unsigned long *maskp,
|
||||
int nmaskbits)
|
||||
int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
|
||||
{
|
||||
unsigned int a, b, old_a, old_b;
|
||||
unsigned int group_size, used_size, off;
|
||||
int c, old_c, totaldigits, ndigits;
|
||||
const char __user __force *ubuf = (const char __user __force *)buf;
|
||||
int at_start, in_range, in_partial_range;
|
||||
struct region r;
|
||||
long ret;
|
||||
|
||||
totaldigits = c = 0;
|
||||
old_a = old_b = 0;
|
||||
group_size = used_size = 0;
|
||||
bitmap_zero(maskp, nmaskbits);
|
||||
do {
|
||||
at_start = 1;
|
||||
in_range = 0;
|
||||
in_partial_range = 0;
|
||||
a = b = 0;
|
||||
ndigits = totaldigits;
|
||||
|
||||
/* Get the next cpu# or a range of cpu#'s */
|
||||
while (buflen) {
|
||||
old_c = c;
|
||||
if (is_user) {
|
||||
if (__get_user(c, ubuf++))
|
||||
return -EFAULT;
|
||||
} else
|
||||
c = *buf++;
|
||||
buflen--;
|
||||
if (isspace(c))
|
||||
continue;
|
||||
while (buf) {
|
||||
buf = bitmap_find_region(buf);
|
||||
if (buf == NULL)
|
||||
return 0;
|
||||
|
||||
/* A '\0' or a ',' signal the end of a cpu# or range */
|
||||
if (c == '\0' || c == ',')
|
||||
break;
|
||||
/*
|
||||
* whitespaces between digits are not allowed,
|
||||
* but it's ok if whitespaces are on head or tail.
|
||||
* when old_c is whilespace,
|
||||
* if totaldigits == ndigits, whitespace is on head.
|
||||
* if whitespace is on tail, it should not run here.
|
||||
* as c was ',' or '\0',
|
||||
* the last code line has broken the current loop.
|
||||
*/
|
||||
if ((totaldigits != ndigits) && isspace(old_c))
|
||||
return -EINVAL;
|
||||
buf = bitmap_parse_region(buf, &r);
|
||||
if (IS_ERR(buf))
|
||||
return PTR_ERR(buf);
|
||||
|
||||
if (c == '/') {
|
||||
used_size = a;
|
||||
at_start = 1;
|
||||
in_range = 0;
|
||||
a = b = 0;
|
||||
continue;
|
||||
}
|
||||
ret = bitmap_check_region(&r);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (c == ':') {
|
||||
old_a = a;
|
||||
old_b = b;
|
||||
at_start = 1;
|
||||
in_range = 0;
|
||||
in_partial_range = 1;
|
||||
a = b = 0;
|
||||
continue;
|
||||
}
|
||||
ret = bitmap_set_region(&r, maskp, nmaskbits);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (c == '-') {
|
||||
if (at_start || in_range)
|
||||
return -EINVAL;
|
||||
b = 0;
|
||||
in_range = 1;
|
||||
at_start = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!isdigit(c))
|
||||
return -EINVAL;
|
||||
|
||||
b = b * 10 + (c - '0');
|
||||
if (!in_range)
|
||||
a = b;
|
||||
at_start = 0;
|
||||
totaldigits++;
|
||||
}
|
||||
if (ndigits == totaldigits)
|
||||
continue;
|
||||
if (in_partial_range) {
|
||||
group_size = a;
|
||||
a = old_a;
|
||||
b = old_b;
|
||||
old_a = old_b = 0;
|
||||
} else {
|
||||
used_size = group_size = b - a + 1;
|
||||
}
|
||||
/* if no digit is after '-', it's wrong*/
|
||||
if (at_start && in_range)
|
||||
return -EINVAL;
|
||||
if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
|
||||
return -EINVAL;
|
||||
if (b >= nmaskbits)
|
||||
return -ERANGE;
|
||||
while (a <= b) {
|
||||
off = min(b - a + 1, used_size);
|
||||
bitmap_set(maskp, a, off);
|
||||
a += group_size;
|
||||
}
|
||||
} while (buflen && c == ',');
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
|
||||
{
|
||||
char *nl = strchrnul(bp, '\n');
|
||||
int len = nl - bp;
|
||||
|
||||
return __bitmap_parselist(bp, len, 0, maskp, nmaskbits);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_parselist);
|
||||
|
||||
|
||||
@@ -632,23 +661,27 @@ EXPORT_SYMBOL(bitmap_parselist);
|
||||
* @nmaskbits: size of bitmap, in bits.
|
||||
*
|
||||
* Wrapper for bitmap_parselist(), providing it with user buffer.
|
||||
*
|
||||
* We cannot have this as an inline function in bitmap.h because it needs
|
||||
* linux/uaccess.h to get the access_ok() declaration and this causes
|
||||
* cyclic dependencies.
|
||||
*/
|
||||
int bitmap_parselist_user(const char __user *ubuf,
|
||||
unsigned int ulen, unsigned long *maskp,
|
||||
int nmaskbits)
|
||||
{
|
||||
if (!access_ok(ubuf, ulen))
|
||||
return -EFAULT;
|
||||
return __bitmap_parselist((const char __force *)ubuf,
|
||||
ulen, 1, maskp, nmaskbits);
|
||||
char *buf;
|
||||
int ret;
|
||||
|
||||
buf = memdup_user_nul(ubuf, ulen);
|
||||
if (IS_ERR(buf))
|
||||
return PTR_ERR(buf);
|
||||
|
||||
ret = bitmap_parselist(buf, maskp, nmaskbits);
|
||||
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_parselist_user);
|
||||
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
/**
|
||||
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
|
||||
* @buf: pointer to a bitmap
|
||||
@@ -757,7 +790,6 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
|
||||
set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_remap);
|
||||
|
||||
/**
|
||||
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
|
||||
@@ -795,7 +827,6 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
|
||||
else
|
||||
return bitmap_ord_to_pos(new, n % w, bits);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_bitremap);
|
||||
|
||||
/**
|
||||
* bitmap_onto - translate one bitmap relative to another
|
||||
@@ -930,7 +961,6 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
|
||||
m++;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_onto);
|
||||
|
||||
/**
|
||||
* bitmap_fold - fold larger bitmap into smaller, modulo specified size
|
||||
@@ -955,7 +985,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
|
||||
for_each_set_bit(oldbit, orig, nbits)
|
||||
set_bit(oldbit % sz, dst);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_fold);
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
/*
|
||||
* Common code for bitmap_*_region() routines.
|
||||
|
@@ -135,18 +135,23 @@ unsigned long long memparse(const char *ptr, char **retptr)
|
||||
case 'E':
|
||||
case 'e':
|
||||
ret <<= 10;
|
||||
/* fall through */
|
||||
case 'P':
|
||||
case 'p':
|
||||
ret <<= 10;
|
||||
/* fall through */
|
||||
case 'T':
|
||||
case 't':
|
||||
ret <<= 10;
|
||||
/* fall through */
|
||||
case 'G':
|
||||
case 'g':
|
||||
ret <<= 10;
|
||||
/* fall through */
|
||||
case 'M':
|
||||
case 'm':
|
||||
ret <<= 10;
|
||||
/* fall through */
|
||||
case 'K':
|
||||
case 'k':
|
||||
ret <<= 10;
|
||||
|
@@ -69,7 +69,6 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
|
||||
|
||||
rcu_read_lock();
|
||||
desc.shash.tfm = rcu_dereference(crct10dif_tfm);
|
||||
desc.shash.flags = 0;
|
||||
*(__u16 *)desc.ctx = crc;
|
||||
|
||||
err = crypto_shash_update(&desc.shash, buffer, len);
|
||||
|
@@ -240,7 +240,6 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
|
||||
goto err;
|
||||
|
||||
desc->tfm = shash;
|
||||
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
crypto_shash_init(desc);
|
||||
crypto_shash_update(desc, data, datalen);
|
||||
|
@@ -37,6 +37,8 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
extern struct _ddebug __start___verbose[];
|
||||
extern struct _ddebug __stop___verbose[];
|
||||
|
||||
@@ -636,6 +638,41 @@ EXPORT_SYMBOL(__dynamic_netdev_dbg);
|
||||
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND)
|
||||
|
||||
void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
|
||||
const struct ib_device *ibdev, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
if (ibdev && ibdev->dev.parent) {
|
||||
char buf[PREFIX_SIZE];
|
||||
|
||||
dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent,
|
||||
"%s%s %s %s: %pV",
|
||||
dynamic_emit_prefix(descriptor, buf),
|
||||
dev_driver_string(ibdev->dev.parent),
|
||||
dev_name(ibdev->dev.parent),
|
||||
dev_name(&ibdev->dev),
|
||||
&vaf);
|
||||
} else if (ibdev) {
|
||||
printk(KERN_DEBUG "%s: %pV", dev_name(&ibdev->dev), &vaf);
|
||||
} else {
|
||||
printk(KERN_DEBUG "(NULL ib_device): %pV", &vaf);
|
||||
}
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL(__dynamic_ibdev_dbg);
|
||||
|
||||
#endif
|
||||
|
||||
#define DDEBUG_STRING_SIZE 1024
|
||||
static __initdata char ddebug_setup_string[DDEBUG_STRING_SIZE];
|
||||
|
||||
|
@@ -189,7 +189,7 @@ static int ei_seq_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ei_entry *ent = list_entry(v, struct ei_entry, list);
|
||||
|
||||
seq_printf(m, "%pf\t%s\n", (void *)ent->start_addr,
|
||||
seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,
|
||||
error_type_string(ent->etype));
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1293,7 +1293,9 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
|
||||
len = maxpages * PAGE_SIZE;
|
||||
addr &= ~(PAGE_SIZE - 1);
|
||||
n = DIV_ROUND_UP(len, PAGE_SIZE);
|
||||
res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
|
||||
res = get_user_pages_fast(addr, n,
|
||||
iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
|
||||
pages);
|
||||
if (unlikely(res < 0))
|
||||
return res;
|
||||
return (res == n ? len : res * PAGE_SIZE) - *start;
|
||||
@@ -1374,7 +1376,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
|
||||
p = get_pages_array(n);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
|
||||
res = get_user_pages_fast(addr, n,
|
||||
iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
|
||||
if (unlikely(res < 0)) {
|
||||
kvfree(p);
|
||||
return res;
|
||||
|
@@ -18,7 +18,7 @@
|
||||
#include <linux/random.h>
|
||||
|
||||
/**
|
||||
* kobject_namespace - return @kobj's namespace tag
|
||||
* kobject_namespace() - Return @kobj's namespace tag.
|
||||
* @kobj: kobject in question
|
||||
*
|
||||
* Returns namespace tag of @kobj if its parent has namespace ops enabled
|
||||
@@ -36,7 +36,7 @@ const void *kobject_namespace(struct kobject *kobj)
|
||||
}
|
||||
|
||||
/**
|
||||
* kobject_get_ownership - get sysfs ownership data for @kobj
|
||||
* kobject_get_ownership() - Get sysfs ownership data for @kobj.
|
||||
* @kobj: kobject in question
|
||||
* @uid: kernel user ID for sysfs objects
|
||||
* @gid: kernel group ID for sysfs objects
|
||||
@@ -82,6 +82,7 @@ static int populate_dir(struct kobject *kobj)
|
||||
|
||||
static int create_dir(struct kobject *kobj)
|
||||
{
|
||||
const struct kobj_type *ktype = get_ktype(kobj);
|
||||
const struct kobj_ns_type_operations *ops;
|
||||
int error;
|
||||
|
||||
@@ -95,6 +96,14 @@ static int create_dir(struct kobject *kobj)
|
||||
return error;
|
||||
}
|
||||
|
||||
if (ktype) {
|
||||
error = sysfs_create_groups(kobj, ktype->default_groups);
|
||||
if (error) {
|
||||
sysfs_remove_dir(kobj);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* @kobj->sd may be deleted by an ancestor going away. Hold an
|
||||
* extra reference so that it stays until @kobj is gone.
|
||||
@@ -153,12 +162,11 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
|
||||
}
|
||||
|
||||
/**
|
||||
* kobject_get_path - generate and return the path associated with a given kobj and kset pair.
|
||||
*
|
||||
* kobject_get_path() - Allocate memory and fill in the path for @kobj.
|
||||
* @kobj: kobject in question, with which to build the path
|
||||
* @gfp_mask: the allocation type used to allocate the path
|
||||
*
|
||||
* The result must be freed by the caller with kfree().
|
||||
* Return: The newly allocated memory, caller must free with kfree().
|
||||
*/
|
||||
char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
|
||||
{
|
||||
@@ -265,7 +273,7 @@ static int kobject_add_internal(struct kobject *kobj)
|
||||
}
|
||||
|
||||
/**
|
||||
* kobject_set_name_vargs - Set the name of an kobject
|
||||
* kobject_set_name_vargs() - Set the name of a kobject.
|
||||
* @kobj: struct kobject to set the name of
|
||||
* @fmt: format string used to build the name
|
||||
* @vargs: vargs to format the string.
|
||||
@@ -305,7 +313,7 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
|
||||
}
|
||||
|
||||
/**
|
||||
* kobject_set_name - Set the name of a kobject
|
||||
* kobject_set_name() - Set the name of a kobject.
|
||||
* @kobj: struct kobject to set the name of
|
||||
* @fmt: format string used to build the name
|
||||
*
|
||||
@@ -327,7 +335,7 @@ int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
|
||||
EXPORT_SYMBOL(kobject_set_name);
|
||||
|
||||
/**
|
||||
* kobject_init - initialize a kobject structure
|
||||
* kobject_init() - Initialize a kobject structure.
|
||||
* @kobj: pointer to the kobject to initialize
|
||||
* @ktype: pointer to the ktype for this kobject.
|
||||
*
|
||||
@@ -383,7 +391,7 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
|
||||
}
|
||||
|
||||
/**
|
||||
* kobject_add - the main kobject add function
|
||||
* kobject_add() - The main kobject add function.
|
||||
* @kobj: the kobject to add
|
||||
* @parent: pointer to the parent of the kobject.
|
||||
* @fmt: format to name the kobject with.
|
||||
@@ -397,15 +405,23 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
|
||||
* is assigned to the kobject, then the kobject will be located in the
|
||||
* root of the sysfs tree.
|
||||
*
|
||||
* If this function returns an error, kobject_put() must be called to
|
||||
* properly clean up the memory associated with the object.
|
||||
* Under no instance should the kobject that is passed to this function
|
||||
* be directly freed with a call to kfree(), that can leak memory.
|
||||
*
|
||||
* Note, no "add" uevent will be created with this call, the caller should set
|
||||
* up all of the necessary sysfs files for the object and then call
|
||||
* kobject_uevent() with the UEVENT_ADD parameter to ensure that
|
||||
* userspace is properly notified of this kobject's creation.
|
||||
*
|
||||
* Return: If this function returns an error, kobject_put() must be
|
||||
* called to properly clean up the memory associated with the
|
||||
* object. Under no instance should the kobject that is passed
|
||||
* to this function be directly freed with a call to kfree(),
|
||||
* that can leak memory.
|
||||
*
|
||||
* If this function returns success, kobject_put() must also be called
|
||||
* in order to properly clean up the memory associated with the object.
|
||||
*
|
||||
* In short, once this function is called, kobject_put() MUST be called
|
||||
* when the use of the object is finished in order to properly free
|
||||
* everything.
|
||||
*/
|
||||
int kobject_add(struct kobject *kobj, struct kobject *parent,
|
||||
const char *fmt, ...)
|
||||
@@ -431,15 +447,19 @@ int kobject_add(struct kobject *kobj, struct kobject *parent,
|
||||
EXPORT_SYMBOL(kobject_add);
|
||||
|
||||
/**
|
||||
* kobject_init_and_add - initialize a kobject structure and add it to the kobject hierarchy
|
||||
* kobject_init_and_add() - Initialize a kobject structure and add it to
|
||||
* the kobject hierarchy.
|
||||
* @kobj: pointer to the kobject to initialize
|
||||
* @ktype: pointer to the ktype for this kobject.
|
||||
* @parent: pointer to the parent of this kobject.
|
||||
* @fmt: the name of the kobject.
|
||||
*
|
||||
* This function combines the call to kobject_init() and
|
||||
* kobject_add(). The same type of error handling after a call to
|
||||
* kobject_add() and kobject lifetime rules are the same here.
|
||||
* This function combines the call to kobject_init() and kobject_add().
|
||||
*
|
||||
* If this function returns an error, kobject_put() must be called to
|
||||
* properly clean up the memory associated with the object. This is the
|
||||
* same type of error handling after a call to kobject_add() and kobject
|
||||
* lifetime rules are the same here.
|
||||
*/
|
||||
int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
|
||||
struct kobject *parent, const char *fmt, ...)
|
||||
@@ -458,7 +478,7 @@ int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
|
||||
EXPORT_SYMBOL_GPL(kobject_init_and_add);
|
||||
|
||||
/**
|
||||
* kobject_rename - change the name of an object
|
||||
* kobject_rename() - Change the name of an object.
|
||||
* @kobj: object in question.
|
||||
* @new_name: object's new name
|
||||
*
|
||||
@@ -525,7 +545,7 @@ out:
|
||||
EXPORT_SYMBOL_GPL(kobject_rename);
|
||||
|
||||
/**
|
||||
* kobject_move - move object to another parent
|
||||
* kobject_move() - Move object to another parent.
|
||||
* @kobj: object in question.
|
||||
* @new_parent: object's new parent (can be NULL)
|
||||
*/
|
||||
@@ -578,17 +598,26 @@ out:
|
||||
EXPORT_SYMBOL_GPL(kobject_move);
|
||||
|
||||
/**
|
||||
* kobject_del - unlink kobject from hierarchy.
|
||||
* kobject_del() - Unlink kobject from hierarchy.
|
||||
* @kobj: object.
|
||||
*
|
||||
* This is the function that should be called to delete an object
|
||||
* successfully added via kobject_add().
|
||||
*/
|
||||
void kobject_del(struct kobject *kobj)
|
||||
{
|
||||
struct kernfs_node *sd;
|
||||
const struct kobj_type *ktype;
|
||||
|
||||
if (!kobj)
|
||||
return;
|
||||
|
||||
sd = kobj->sd;
|
||||
ktype = get_ktype(kobj);
|
||||
|
||||
if (ktype)
|
||||
sysfs_remove_groups(kobj, ktype->default_groups);
|
||||
|
||||
sysfs_remove_dir(kobj);
|
||||
sysfs_put(sd);
|
||||
|
||||
@@ -600,7 +629,7 @@ void kobject_del(struct kobject *kobj)
|
||||
EXPORT_SYMBOL(kobject_del);
|
||||
|
||||
/**
|
||||
* kobject_get - increment refcount for object.
|
||||
* kobject_get() - Increment refcount for object.
|
||||
* @kobj: object.
|
||||
*/
|
||||
struct kobject *kobject_get(struct kobject *kobj)
|
||||
@@ -693,7 +722,7 @@ static void kobject_release(struct kref *kref)
|
||||
}
|
||||
|
||||
/**
|
||||
* kobject_put - decrement refcount for object.
|
||||
* kobject_put() - Decrement refcount for object.
|
||||
* @kobj: object.
|
||||
*
|
||||
* Decrement the refcount, and if 0, call kobject_cleanup().
|
||||
@@ -722,7 +751,7 @@ static struct kobj_type dynamic_kobj_ktype = {
|
||||
};
|
||||
|
||||
/**
|
||||
* kobject_create - create a struct kobject dynamically
|
||||
* kobject_create() - Create a struct kobject dynamically.
|
||||
*
|
||||
* This function creates a kobject structure dynamically and sets it up
|
||||
* to be a "dynamic" kobject with a default release function set up.
|
||||
@@ -745,8 +774,8 @@ struct kobject *kobject_create(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* kobject_create_and_add - create a struct kobject dynamically and register it with sysfs
|
||||
*
|
||||
* kobject_create_and_add() - Create a struct kobject dynamically and
|
||||
* register it with sysfs.
|
||||
* @name: the name for the kobject
|
||||
* @parent: the parent kobject of this kobject, if any.
|
||||
*
|
||||
@@ -777,7 +806,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
|
||||
EXPORT_SYMBOL_GPL(kobject_create_and_add);
|
||||
|
||||
/**
|
||||
* kset_init - initialize a kset for use
|
||||
* kset_init() - Initialize a kset for use.
|
||||
* @k: kset
|
||||
*/
|
||||
void kset_init(struct kset *k)
|
||||
@@ -819,7 +848,7 @@ const struct sysfs_ops kobj_sysfs_ops = {
|
||||
EXPORT_SYMBOL_GPL(kobj_sysfs_ops);
|
||||
|
||||
/**
|
||||
* kset_register - initialize and add a kset.
|
||||
* kset_register() - Initialize and add a kset.
|
||||
* @k: kset.
|
||||
*/
|
||||
int kset_register(struct kset *k)
|
||||
@@ -839,7 +868,7 @@ int kset_register(struct kset *k)
|
||||
EXPORT_SYMBOL(kset_register);
|
||||
|
||||
/**
|
||||
* kset_unregister - remove a kset.
|
||||
* kset_unregister() - Remove a kset.
|
||||
* @k: kset.
|
||||
*/
|
||||
void kset_unregister(struct kset *k)
|
||||
@@ -852,7 +881,7 @@ void kset_unregister(struct kset *k)
|
||||
EXPORT_SYMBOL(kset_unregister);
|
||||
|
||||
/**
|
||||
* kset_find_obj - search for object in kset.
|
||||
* kset_find_obj() - Search for object in kset.
|
||||
* @kset: kset we're looking in.
|
||||
* @name: object's name.
|
||||
*
|
||||
@@ -900,7 +929,7 @@ static struct kobj_type kset_ktype = {
|
||||
};
|
||||
|
||||
/**
|
||||
* kset_create - create a struct kset dynamically
|
||||
* kset_create() - Create a struct kset dynamically.
|
||||
*
|
||||
* @name: the name for the kset
|
||||
* @uevent_ops: a struct kset_uevent_ops for the kset
|
||||
@@ -944,7 +973,7 @@ static struct kset *kset_create(const char *name,
|
||||
}
|
||||
|
||||
/**
|
||||
* kset_create_and_add - create a struct kset dynamically and add it to sysfs
|
||||
* kset_create_and_add() - Create a struct kset dynamically and add it to sysfs.
|
||||
*
|
||||
* @name: the name for the kset
|
||||
* @uevent_ops: a struct kset_uevent_ops for the kset
|
||||
|
@@ -466,6 +466,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
|
||||
int i = 0;
|
||||
int retval = 0;
|
||||
|
||||
/*
|
||||
* Mark "remove" event done regardless of result, for some subsystems
|
||||
* do not want to re-trigger "remove" event via automatic cleanup.
|
||||
*/
|
||||
if (action == KOBJ_REMOVE)
|
||||
kobj->state_remove_uevent_sent = 1;
|
||||
|
||||
pr_debug("kobject: '%s' (%p): %s\n",
|
||||
kobject_name(kobj), kobj, __func__);
|
||||
|
||||
@@ -567,10 +574,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
|
||||
kobj->state_add_uevent_sent = 1;
|
||||
break;
|
||||
|
||||
case KOBJ_REMOVE:
|
||||
kobj->state_remove_uevent_sent = 1;
|
||||
break;
|
||||
|
||||
case KOBJ_UNBIND:
|
||||
zap_modalias_env(env);
|
||||
break;
|
||||
|
@@ -47,7 +47,6 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
|
||||
int err;
|
||||
|
||||
shash->tfm = tfm;
|
||||
shash->flags = 0;
|
||||
*ctx = crc;
|
||||
|
||||
err = crypto_shash_update(shash, address, length);
|
||||
|
238
lib/list_sort.c
238
lib/list_sort.c
@@ -7,33 +7,41 @@
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#define MAX_LIST_LENGTH_BITS 20
|
||||
typedef int __attribute__((nonnull(2,3))) (*cmp_func)(void *,
|
||||
struct list_head const *, struct list_head const *);
|
||||
|
||||
/*
|
||||
* Returns a list organized in an intermediate format suited
|
||||
* to chaining of merge() calls: null-terminated, no reserved or
|
||||
* sentinel head node, "prev" links not maintained.
|
||||
*/
|
||||
static struct list_head *merge(void *priv,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b),
|
||||
__attribute__((nonnull(2,3,4)))
|
||||
static struct list_head *merge(void *priv, cmp_func cmp,
|
||||
struct list_head *a, struct list_head *b)
|
||||
{
|
||||
struct list_head head, *tail = &head;
|
||||
struct list_head *head, **tail = &head;
|
||||
|
||||
while (a && b) {
|
||||
for (;;) {
|
||||
/* if equal, take 'a' -- important for sort stability */
|
||||
if ((*cmp)(priv, a, b) <= 0) {
|
||||
tail->next = a;
|
||||
if (cmp(priv, a, b) <= 0) {
|
||||
*tail = a;
|
||||
tail = &a->next;
|
||||
a = a->next;
|
||||
if (!a) {
|
||||
*tail = b;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
tail->next = b;
|
||||
*tail = b;
|
||||
tail = &b->next;
|
||||
b = b->next;
|
||||
if (!b) {
|
||||
*tail = a;
|
||||
break;
|
||||
}
|
||||
}
|
||||
tail = tail->next;
|
||||
}
|
||||
tail->next = a?:b;
|
||||
return head.next;
|
||||
return head;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -43,44 +51,52 @@ static struct list_head *merge(void *priv,
|
||||
* prev-link restoration pass, or maintaining the prev links
|
||||
* throughout.
|
||||
*/
|
||||
static void merge_and_restore_back_links(void *priv,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b),
|
||||
struct list_head *head,
|
||||
struct list_head *a, struct list_head *b)
|
||||
__attribute__((nonnull(2,3,4,5)))
|
||||
static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
|
||||
struct list_head *a, struct list_head *b)
|
||||
{
|
||||
struct list_head *tail = head;
|
||||
u8 count = 0;
|
||||
|
||||
while (a && b) {
|
||||
for (;;) {
|
||||
/* if equal, take 'a' -- important for sort stability */
|
||||
if ((*cmp)(priv, a, b) <= 0) {
|
||||
if (cmp(priv, a, b) <= 0) {
|
||||
tail->next = a;
|
||||
a->prev = tail;
|
||||
tail = a;
|
||||
a = a->next;
|
||||
if (!a)
|
||||
break;
|
||||
} else {
|
||||
tail->next = b;
|
||||
b->prev = tail;
|
||||
tail = b;
|
||||
b = b->next;
|
||||
if (!b) {
|
||||
b = a;
|
||||
break;
|
||||
}
|
||||
}
|
||||
tail = tail->next;
|
||||
}
|
||||
tail->next = a ? : b;
|
||||
|
||||
/* Finish linking remainder of list b on to tail */
|
||||
tail->next = b;
|
||||
do {
|
||||
/*
|
||||
* In worst cases this loop may run many iterations.
|
||||
* If the merge is highly unbalanced (e.g. the input is
|
||||
* already sorted), this loop may run many iterations.
|
||||
* Continue callbacks to the client even though no
|
||||
* element comparison is needed, so the client's cmp()
|
||||
* routine can invoke cond_resched() periodically.
|
||||
*/
|
||||
if (unlikely(!(++count)))
|
||||
(*cmp)(priv, tail->next, tail->next);
|
||||
|
||||
tail->next->prev = tail;
|
||||
tail = tail->next;
|
||||
} while (tail->next);
|
||||
if (unlikely(!++count))
|
||||
cmp(priv, b, b);
|
||||
b->prev = tail;
|
||||
tail = b;
|
||||
b = b->next;
|
||||
} while (b);
|
||||
|
||||
/* And the final links to make a circular doubly-linked list */
|
||||
tail->next = head;
|
||||
head->prev = tail;
|
||||
}
|
||||
@@ -91,55 +107,149 @@ static void merge_and_restore_back_links(void *priv,
|
||||
* @head: the list to sort
|
||||
* @cmp: the elements comparison function
|
||||
*
|
||||
* This function implements "merge sort", which has O(nlog(n))
|
||||
* complexity.
|
||||
* The comparison funtion @cmp must return > 0 if @a should sort after
|
||||
* @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
|
||||
* sort before @b *or* their original order should be preserved. It is
|
||||
* always called with the element that came first in the input in @a,
|
||||
* and list_sort is a stable sort, so it is not necessary to distinguish
|
||||
* the @a < @b and @a == @b cases.
|
||||
*
|
||||
* The comparison function @cmp must return a negative value if @a
|
||||
* should sort before @b, and a positive value if @a should sort after
|
||||
* @b. If @a and @b are equivalent, and their original relative
|
||||
* ordering is to be preserved, @cmp must return 0.
|
||||
* This is compatible with two styles of @cmp function:
|
||||
* - The traditional style which returns <0 / =0 / >0, or
|
||||
* - Returning a boolean 0/1.
|
||||
* The latter offers a chance to save a few cycles in the comparison
|
||||
* (which is used by e.g. plug_ctx_cmp() in block/blk-mq.c).
|
||||
*
|
||||
* A good way to write a multi-word comparison is
|
||||
* if (a->high != b->high)
|
||||
* return a->high > b->high;
|
||||
* if (a->middle != b->middle)
|
||||
* return a->middle > b->middle;
|
||||
* return a->low > b->low;
|
||||
*
|
||||
*
|
||||
* This mergesort is as eager as possible while always performing at least
|
||||
* 2:1 balanced merges. Given two pending sublists of size 2^k, they are
|
||||
* merged to a size-2^(k+1) list as soon as we have 2^k following elements.
|
||||
*
|
||||
* Thus, it will avoid cache thrashing as long as 3*2^k elements can
|
||||
* fit into the cache. Not quite as good as a fully-eager bottom-up
|
||||
* mergesort, but it does use 0.2*n fewer comparisons, so is faster in
|
||||
* the common case that everything fits into L1.
|
||||
*
|
||||
*
|
||||
* The merging is controlled by "count", the number of elements in the
|
||||
* pending lists. This is beautiully simple code, but rather subtle.
|
||||
*
|
||||
* Each time we increment "count", we set one bit (bit k) and clear
|
||||
* bits k-1 .. 0. Each time this happens (except the very first time
|
||||
* for each bit, when count increments to 2^k), we merge two lists of
|
||||
* size 2^k into one list of size 2^(k+1).
|
||||
*
|
||||
* This merge happens exactly when the count reaches an odd multiple of
|
||||
* 2^k, which is when we have 2^k elements pending in smaller lists,
|
||||
* so it's safe to merge away two lists of size 2^k.
|
||||
*
|
||||
* After this happens twice, we have created two lists of size 2^(k+1),
|
||||
* which will be merged into a list of size 2^(k+2) before we create
|
||||
* a third list of size 2^(k+1), so there are never more than two pending.
|
||||
*
|
||||
* The number of pending lists of size 2^k is determined by the
|
||||
* state of bit k of "count" plus two extra pieces of information:
|
||||
* - The state of bit k-1 (when k == 0, consider bit -1 always set), and
|
||||
* - Whether the higher-order bits are zero or non-zero (i.e.
|
||||
* is count >= 2^(k+1)).
|
||||
* There are six states we distinguish. "x" represents some arbitrary
|
||||
* bits, and "y" represents some arbitrary non-zero bits:
|
||||
* 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
|
||||
* 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
|
||||
* 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k
|
||||
* 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
|
||||
* 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k
|
||||
* 5: y01x: 2 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
|
||||
* (merge and loop back to state 2)
|
||||
*
|
||||
* We gain lists of size 2^k in the 2->3 and 4->5 transitions (because
|
||||
* bit k-1 is set while the more significant bits are non-zero) and
|
||||
* merge them away in the 5->2 transition. Note in particular that just
|
||||
* before the 5->2 transition, all lower-order bits are 11 (state 3),
|
||||
* so there is one list of each smaller size.
|
||||
*
|
||||
* When we reach the end of the input, we merge all the pending
|
||||
* lists, from smallest to largest. If you work through cases 2 to
|
||||
* 5 above, you can see that the number of elements we merge with a list
|
||||
* of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
|
||||
* 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
|
||||
*/
|
||||
__attribute__((nonnull(2,3)))
|
||||
void list_sort(void *priv, struct list_head *head,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b))
|
||||
{
|
||||
struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
|
||||
-- last slot is a sentinel */
|
||||
int lev; /* index into part[] */
|
||||
int max_lev = 0;
|
||||
struct list_head *list;
|
||||
struct list_head *list = head->next, *pending = NULL;
|
||||
size_t count = 0; /* Count of pending */
|
||||
|
||||
if (list_empty(head))
|
||||
if (list == head->prev) /* Zero or one elements */
|
||||
return;
|
||||
|
||||
memset(part, 0, sizeof(part));
|
||||
|
||||
/* Convert to a null-terminated singly-linked list. */
|
||||
head->prev->next = NULL;
|
||||
list = head->next;
|
||||
|
||||
while (list) {
|
||||
struct list_head *cur = list;
|
||||
/*
|
||||
* Data structure invariants:
|
||||
* - All lists are singly linked and null-terminated; prev
|
||||
* pointers are not maintained.
|
||||
* - pending is a prev-linked "list of lists" of sorted
|
||||
* sublists awaiting further merging.
|
||||
* - Each of the sorted sublists is power-of-two in size.
|
||||
* - Sublists are sorted by size and age, smallest & newest at front.
|
||||
* - There are zero to two sublists of each size.
|
||||
* - A pair of pending sublists are merged as soon as the number
|
||||
* of following pending elements equals their size (i.e.
|
||||
* each time count reaches an odd multiple of that size).
|
||||
* That ensures each later final merge will be at worst 2:1.
|
||||
* - Each round consists of:
|
||||
* - Merging the two sublists selected by the highest bit
|
||||
* which flips when count is incremented, and
|
||||
* - Adding an element from the input as a size-1 sublist.
|
||||
*/
|
||||
do {
|
||||
size_t bits;
|
||||
struct list_head **tail = &pending;
|
||||
|
||||
/* Find the least-significant clear bit in count */
|
||||
for (bits = count; bits & 1; bits >>= 1)
|
||||
tail = &(*tail)->prev;
|
||||
/* Do the indicated merge */
|
||||
if (likely(bits)) {
|
||||
struct list_head *a = *tail, *b = a->prev;
|
||||
|
||||
a = merge(priv, (cmp_func)cmp, b, a);
|
||||
/* Install the merged result in place of the inputs */
|
||||
a->prev = b->prev;
|
||||
*tail = a;
|
||||
}
|
||||
|
||||
/* Move one element from input list to pending */
|
||||
list->prev = pending;
|
||||
pending = list;
|
||||
list = list->next;
|
||||
cur->next = NULL;
|
||||
pending->next = NULL;
|
||||
count++;
|
||||
} while (list);
|
||||
|
||||
for (lev = 0; part[lev]; lev++) {
|
||||
cur = merge(priv, cmp, part[lev], cur);
|
||||
part[lev] = NULL;
|
||||
}
|
||||
if (lev > max_lev) {
|
||||
if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
|
||||
printk_once(KERN_DEBUG "list too long for efficiency\n");
|
||||
lev--;
|
||||
}
|
||||
max_lev = lev;
|
||||
}
|
||||
part[lev] = cur;
|
||||
/* End of input; merge together all the pending lists. */
|
||||
list = pending;
|
||||
pending = pending->prev;
|
||||
for (;;) {
|
||||
struct list_head *next = pending->prev;
|
||||
|
||||
if (!next)
|
||||
break;
|
||||
list = merge(priv, (cmp_func)cmp, pending, list);
|
||||
pending = next;
|
||||
}
|
||||
|
||||
for (lev = 0; lev < max_lev; lev++)
|
||||
if (part[lev])
|
||||
list = merge(priv, cmp, part[lev], list);
|
||||
|
||||
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
|
||||
/* The final merge, rebuilding prev links */
|
||||
merge_final(priv, (cmp_func)cmp, head, pending, list);
|
||||
}
|
||||
EXPORT_SYMBOL(list_sort);
|
||||
|
11
lib/math/Kconfig
Normal file
11
lib/math/Kconfig
Normal file
@@ -0,0 +1,11 @@
|
||||
config CORDIC
|
||||
tristate "CORDIC algorithm"
|
||||
help
|
||||
This option provides an implementation of the CORDIC algorithm;
|
||||
calculations are in fixed point. Module will be called cordic.
|
||||
|
||||
config PRIME_NUMBERS
|
||||
tristate
|
||||
|
||||
config RATIONAL
|
||||
bool
|
5
lib/math/Makefile
Normal file
5
lib/math/Makefile
Normal file
@@ -0,0 +1,5 @@
|
||||
obj-y += div64.o gcd.o lcm.o int_pow.o int_sqrt.o reciprocal_div.o
|
||||
|
||||
obj-$(CONFIG_CORDIC) += cordic.o
|
||||
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
|
||||
obj-$(CONFIG_RATIONAL) += rational.o
|
@@ -10,7 +10,7 @@
|
||||
* Generic C version of 64bit/32bit division and modulo, with
|
||||
* 64bit result and 32bit remainder.
|
||||
*
|
||||
* The fast case for (n>>32 == 0) is handled inline by do_div().
|
||||
* The fast case for (n>>32 == 0) is handled inline by do_div().
|
||||
*
|
||||
* Code generated for this function might be very inefficient
|
||||
* for some CPUs. __div64_32() can be overridden by linking arch-specific
|
32
lib/math/int_pow.c
Normal file
32
lib/math/int_pow.c
Normal file
@@ -0,0 +1,32 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* An integer based power function
|
||||
*
|
||||
* Derived from drivers/video/backlight/pwm_bl.c
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* int_pow - computes the exponentiation of the given base and exponent
|
||||
* @base: base which will be raised to the given power
|
||||
* @exp: power to be raised to
|
||||
*
|
||||
* Computes: pow(base, exp), i.e. @base raised to the @exp power
|
||||
*/
|
||||
u64 int_pow(u64 base, unsigned int exp)
|
||||
{
|
||||
u64 result = 1;
|
||||
|
||||
while (exp) {
|
||||
if (exp & 1)
|
||||
result *= base;
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(int_pow);
|
208
lib/nlattr.c
208
lib/nlattr.c
@@ -69,7 +69,8 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
|
||||
|
||||
static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
|
||||
const struct nla_policy *policy,
|
||||
struct netlink_ext_ack *extack)
|
||||
struct netlink_ext_ack *extack,
|
||||
unsigned int validate)
|
||||
{
|
||||
const struct nlattr *entry;
|
||||
int rem;
|
||||
@@ -86,8 +87,8 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
ret = nla_validate(nla_data(entry), nla_len(entry),
|
||||
maxtype, policy, extack);
|
||||
ret = __nla_validate(nla_data(entry), nla_len(entry),
|
||||
maxtype, policy, validate, extack);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@@ -154,13 +155,17 @@ static int nla_validate_int_range(const struct nla_policy *pt,
|
||||
}
|
||||
|
||||
static int validate_nla(const struct nlattr *nla, int maxtype,
|
||||
const struct nla_policy *policy,
|
||||
const struct nla_policy *policy, unsigned int validate,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u16 strict_start_type = policy[0].strict_start_type;
|
||||
const struct nla_policy *pt;
|
||||
int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
|
||||
int err = -ERANGE;
|
||||
|
||||
if (strict_start_type && type >= strict_start_type)
|
||||
validate |= NL_VALIDATE_STRICT;
|
||||
|
||||
if (type <= 0 || type > maxtype)
|
||||
return 0;
|
||||
|
||||
@@ -172,6 +177,26 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
|
||||
(pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) {
|
||||
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
|
||||
current->comm, type);
|
||||
if (validate & NL_VALIDATE_STRICT_ATTRS) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, nla,
|
||||
"invalid attribute length");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (validate & NL_VALIDATE_NESTED) {
|
||||
if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) &&
|
||||
!(nla->nla_type & NLA_F_NESTED)) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, nla,
|
||||
"NLA_F_NESTED is missing");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY &&
|
||||
pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, nla,
|
||||
"NLA_F_NESTED not expected");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
switch (pt->type) {
|
||||
@@ -244,8 +269,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
|
||||
if (attrlen < NLA_HDRLEN)
|
||||
goto out_err;
|
||||
if (pt->validation_data) {
|
||||
err = nla_validate(nla_data(nla), nla_len(nla), pt->len,
|
||||
pt->validation_data, extack);
|
||||
err = __nla_validate(nla_data(nla), nla_len(nla), pt->len,
|
||||
pt->validation_data, validate,
|
||||
extack);
|
||||
if (err < 0) {
|
||||
/*
|
||||
* return directly to preserve the inner
|
||||
@@ -268,7 +294,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
|
||||
|
||||
err = nla_validate_array(nla_data(nla), nla_len(nla),
|
||||
pt->len, pt->validation_data,
|
||||
extack);
|
||||
extack, validate);
|
||||
if (err < 0) {
|
||||
/*
|
||||
* return directly to preserve the inner
|
||||
@@ -278,10 +304,23 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case NLA_UNSPEC:
|
||||
if (validate & NL_VALIDATE_UNSPEC) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, nla,
|
||||
"Unsupported attribute");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* fall through */
|
||||
case NLA_MIN_LEN:
|
||||
if (attrlen < pt->len)
|
||||
goto out_err;
|
||||
break;
|
||||
|
||||
default:
|
||||
if (pt->len)
|
||||
minlen = pt->len;
|
||||
else if (pt->type != NLA_UNSPEC)
|
||||
else
|
||||
minlen = nla_attr_minlen[pt->type];
|
||||
|
||||
if (attrlen < minlen)
|
||||
@@ -315,37 +354,76 @@ out_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* nla_validate - Validate a stream of attributes
|
||||
* @head: head of attribute stream
|
||||
* @len: length of attribute stream
|
||||
* @maxtype: maximum attribute type to be expected
|
||||
* @policy: validation policy
|
||||
* @extack: extended ACK report struct
|
||||
*
|
||||
* Validates all attributes in the specified attribute stream against the
|
||||
* specified policy. Attributes with a type exceeding maxtype will be
|
||||
* ignored. See documenation of struct nla_policy for more details.
|
||||
*
|
||||
* Returns 0 on success or a negative error code.
|
||||
*/
|
||||
int nla_validate(const struct nlattr *head, int len, int maxtype,
|
||||
const struct nla_policy *policy,
|
||||
struct netlink_ext_ack *extack)
|
||||
static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
|
||||
const struct nla_policy *policy,
|
||||
unsigned int validate,
|
||||
struct netlink_ext_ack *extack,
|
||||
struct nlattr **tb)
|
||||
{
|
||||
const struct nlattr *nla;
|
||||
int rem;
|
||||
|
||||
nla_for_each_attr(nla, head, len, rem) {
|
||||
int err = validate_nla(nla, maxtype, policy, extack);
|
||||
if (tb)
|
||||
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
nla_for_each_attr(nla, head, len, rem) {
|
||||
u16 type = nla_type(nla);
|
||||
|
||||
if (type == 0 || type > maxtype) {
|
||||
if (validate & NL_VALIDATE_MAXTYPE) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, nla,
|
||||
"Unknown attribute type");
|
||||
return -EINVAL;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (policy) {
|
||||
int err = validate_nla(nla, maxtype, policy,
|
||||
validate, extack);
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (tb)
|
||||
tb[type] = (struct nlattr *)nla;
|
||||
}
|
||||
|
||||
if (unlikely(rem > 0)) {
|
||||
pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
|
||||
rem, current->comm);
|
||||
NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
|
||||
if (validate & NL_VALIDATE_TRAILING)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(nla_validate);
|
||||
|
||||
/**
|
||||
* __nla_validate - Validate a stream of attributes
|
||||
* @head: head of attribute stream
|
||||
* @len: length of attribute stream
|
||||
* @maxtype: maximum attribute type to be expected
|
||||
* @policy: validation policy
|
||||
* @validate: validation strictness
|
||||
* @extack: extended ACK report struct
|
||||
*
|
||||
* Validates all attributes in the specified attribute stream against the
|
||||
* specified policy. Validation depends on the validate flags passed, see
|
||||
* &enum netlink_validation for more details on that.
|
||||
* See documenation of struct nla_policy for more details.
|
||||
*
|
||||
* Returns 0 on success or a negative error code.
|
||||
*/
|
||||
int __nla_validate(const struct nlattr *head, int len, int maxtype,
|
||||
const struct nla_policy *policy, unsigned int validate,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return __nla_validate_parse(head, len, maxtype, policy, validate,
|
||||
extack, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(__nla_validate);
|
||||
|
||||
/**
|
||||
* nla_policy_len - Determin the max. length of a policy
|
||||
@@ -377,76 +455,30 @@ nla_policy_len(const struct nla_policy *p, int n)
|
||||
EXPORT_SYMBOL(nla_policy_len);
|
||||
|
||||
/**
|
||||
* nla_parse - Parse a stream of attributes into a tb buffer
|
||||
* __nla_parse - Parse a stream of attributes into a tb buffer
|
||||
* @tb: destination array with maxtype+1 elements
|
||||
* @maxtype: maximum attribute type to be expected
|
||||
* @head: head of attribute stream
|
||||
* @len: length of attribute stream
|
||||
* @policy: validation policy
|
||||
* @validate: validation strictness
|
||||
* @extack: extended ACK pointer
|
||||
*
|
||||
* Parses a stream of attributes and stores a pointer to each attribute in
|
||||
* the tb array accessible via the attribute type. Attributes with a type
|
||||
* exceeding maxtype will be silently ignored for backwards compatibility
|
||||
* reasons. policy may be set to NULL if no validation is required.
|
||||
* the tb array accessible via the attribute type.
|
||||
* Validation is controlled by the @validate parameter.
|
||||
*
|
||||
* Returns 0 on success or a negative error code.
|
||||
*/
|
||||
static int __nla_parse(struct nlattr **tb, int maxtype,
|
||||
const struct nlattr *head, int len,
|
||||
bool strict, const struct nla_policy *policy,
|
||||
struct netlink_ext_ack *extack)
|
||||
int __nla_parse(struct nlattr **tb, int maxtype,
|
||||
const struct nlattr *head, int len,
|
||||
const struct nla_policy *policy, unsigned int validate,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nlattr *nla;
|
||||
int rem;
|
||||
|
||||
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
|
||||
|
||||
nla_for_each_attr(nla, head, len, rem) {
|
||||
u16 type = nla_type(nla);
|
||||
|
||||
if (type == 0 || type > maxtype) {
|
||||
if (strict) {
|
||||
NL_SET_ERR_MSG(extack, "Unknown attribute type");
|
||||
return -EINVAL;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (policy) {
|
||||
int err = validate_nla(nla, maxtype, policy, extack);
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
tb[type] = (struct nlattr *)nla;
|
||||
}
|
||||
|
||||
if (unlikely(rem > 0)) {
|
||||
pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
|
||||
rem, current->comm);
|
||||
NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
|
||||
if (strict)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return __nla_validate_parse(head, len, maxtype, policy, validate,
|
||||
extack, tb);
|
||||
}
|
||||
|
||||
int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
|
||||
int len, const struct nla_policy *policy,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return __nla_parse(tb, maxtype, head, len, false, policy, extack);
|
||||
}
|
||||
EXPORT_SYMBOL(nla_parse);
|
||||
|
||||
int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
|
||||
int len, const struct nla_policy *policy,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return __nla_parse(tb, maxtype, head, len, true, policy, extack);
|
||||
}
|
||||
EXPORT_SYMBOL(nla_parse_strict);
|
||||
EXPORT_SYMBOL(__nla_parse);
|
||||
|
||||
/**
|
||||
* nla_find - Find a specific attribute in a stream of attributes
|
||||
|
213
lib/packing.c
Normal file
213
lib/packing.c
Normal file
@@ -0,0 +1,213 @@
|
||||
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
|
||||
/* Copyright (c) 2016-2018, NXP Semiconductors
|
||||
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
*/
|
||||
#include <linux/packing.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
static int get_le_offset(int offset)
|
||||
{
|
||||
int closest_multiple_of_4;
|
||||
|
||||
closest_multiple_of_4 = (offset / 4) * 4;
|
||||
offset -= closest_multiple_of_4;
|
||||
return closest_multiple_of_4 + (3 - offset);
|
||||
}
|
||||
|
||||
static int get_reverse_lsw32_offset(int offset, size_t len)
|
||||
{
|
||||
int closest_multiple_of_4;
|
||||
int word_index;
|
||||
|
||||
word_index = offset / 4;
|
||||
closest_multiple_of_4 = word_index * 4;
|
||||
offset -= closest_multiple_of_4;
|
||||
word_index = (len / 4) - word_index - 1;
|
||||
return word_index * 4 + offset;
|
||||
}
|
||||
|
||||
static u64 bit_reverse(u64 val, unsigned int width)
|
||||
{
|
||||
u64 new_val = 0;
|
||||
unsigned int bit;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < width; i++) {
|
||||
bit = (val & (1 << i)) != 0;
|
||||
new_val |= (bit << (width - i - 1));
|
||||
}
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
|
||||
int *box_end_bit, u8 *box_mask)
|
||||
{
|
||||
int box_bit_width = *box_start_bit - *box_end_bit + 1;
|
||||
int new_box_start_bit, new_box_end_bit;
|
||||
|
||||
*to_write >>= *box_end_bit;
|
||||
*to_write = bit_reverse(*to_write, box_bit_width);
|
||||
*to_write <<= *box_end_bit;
|
||||
|
||||
new_box_end_bit = box_bit_width - *box_start_bit - 1;
|
||||
new_box_start_bit = box_bit_width - *box_end_bit - 1;
|
||||
*box_mask = GENMASK_ULL(new_box_start_bit, new_box_end_bit);
|
||||
*box_start_bit = new_box_start_bit;
|
||||
*box_end_bit = new_box_end_bit;
|
||||
}
|
||||
|
||||
/**
|
||||
* packing - Convert numbers (currently u64) between a packed and an unpacked
|
||||
* format. Unpacked means laid out in memory in the CPU's native
|
||||
* understanding of integers, while packed means anything else that
|
||||
* requires translation.
|
||||
*
|
||||
* @pbuf: Pointer to a buffer holding the packed value.
|
||||
* @uval: Pointer to an u64 holding the unpacked value.
|
||||
* @startbit: The index (in logical notation, compensated for quirks) where
|
||||
* the packed value starts within pbuf. Must be larger than, or
|
||||
* equal to, endbit.
|
||||
* @endbit: The index (in logical notation, compensated for quirks) where
|
||||
* the packed value ends within pbuf. Must be smaller than, or equal
|
||||
* to, startbit.
|
||||
* @op: If PACK, then uval will be treated as const pointer and copied (packed)
|
||||
* into pbuf, between startbit and endbit.
|
||||
* If UNPACK, then pbuf will be treated as const pointer and the logical
|
||||
* value between startbit and endbit will be copied (unpacked) to uval.
|
||||
* @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
|
||||
* QUIRK_MSB_ON_THE_RIGHT.
|
||||
*
|
||||
* Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
|
||||
* correct usage, return code may be discarded.
|
||||
* If op is PACK, pbuf is modified.
|
||||
* If op is UNPACK, uval is modified.
|
||||
*/
|
||||
int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
|
||||
enum packing_op op, u8 quirks)
|
||||
{
|
||||
/* Number of bits for storing "uval"
|
||||
* also width of the field to access in the pbuf
|
||||
*/
|
||||
u64 value_width;
|
||||
/* Logical byte indices corresponding to the
|
||||
* start and end of the field.
|
||||
*/
|
||||
int plogical_first_u8, plogical_last_u8, box;
|
||||
|
||||
/* startbit is expected to be larger than endbit */
|
||||
if (startbit < endbit)
|
||||
/* Invalid function call */
|
||||
return -EINVAL;
|
||||
|
||||
value_width = startbit - endbit + 1;
|
||||
if (value_width > 64)
|
||||
return -ERANGE;
|
||||
|
||||
/* Check if "uval" fits in "value_width" bits.
|
||||
* If value_width is 64, the check will fail, but any
|
||||
* 64-bit uval will surely fit.
|
||||
*/
|
||||
if (op == PACK && value_width < 64 && (*uval >= (1ull << value_width)))
|
||||
/* Cannot store "uval" inside "value_width" bits.
|
||||
* Truncating "uval" is most certainly not desirable,
|
||||
* so simply erroring out is appropriate.
|
||||
*/
|
||||
return -ERANGE;
|
||||
|
||||
/* Initialize parameter */
|
||||
if (op == UNPACK)
|
||||
*uval = 0;
|
||||
|
||||
/* Iterate through an idealistic view of the pbuf as an u64 with
|
||||
* no quirks, u8 by u8 (aligned at u8 boundaries), from high to low
|
||||
* logical bit significance. "box" denotes the current logical u8.
|
||||
*/
|
||||
plogical_first_u8 = startbit / 8;
|
||||
plogical_last_u8 = endbit / 8;
|
||||
|
||||
for (box = plogical_first_u8; box >= plogical_last_u8; box--) {
|
||||
/* Bit indices into the currently accessed 8-bit box */
|
||||
int box_start_bit, box_end_bit, box_addr;
|
||||
u8 box_mask;
|
||||
/* Corresponding bits from the unpacked u64 parameter */
|
||||
int proj_start_bit, proj_end_bit;
|
||||
u64 proj_mask;
|
||||
|
||||
/* This u8 may need to be accessed in its entirety
|
||||
* (from bit 7 to bit 0), or not, depending on the
|
||||
* input arguments startbit and endbit.
|
||||
*/
|
||||
if (box == plogical_first_u8)
|
||||
box_start_bit = startbit % 8;
|
||||
else
|
||||
box_start_bit = 7;
|
||||
if (box == plogical_last_u8)
|
||||
box_end_bit = endbit % 8;
|
||||
else
|
||||
box_end_bit = 0;
|
||||
|
||||
/* We have determined the box bit start and end.
|
||||
* Now we calculate where this (masked) u8 box would fit
|
||||
* in the unpacked (CPU-readable) u64 - the u8 box's
|
||||
* projection onto the unpacked u64. Though the
|
||||
* box is u8, the projection is u64 because it may fall
|
||||
* anywhere within the unpacked u64.
|
||||
*/
|
||||
proj_start_bit = ((box * 8) + box_start_bit) - endbit;
|
||||
proj_end_bit = ((box * 8) + box_end_bit) - endbit;
|
||||
proj_mask = GENMASK_ULL(proj_start_bit, proj_end_bit);
|
||||
box_mask = GENMASK_ULL(box_start_bit, box_end_bit);
|
||||
|
||||
/* Determine the offset of the u8 box inside the pbuf,
|
||||
* adjusted for quirks. The adjusted box_addr will be used for
|
||||
* effective addressing inside the pbuf (so it's not
|
||||
* logical any longer).
|
||||
*/
|
||||
box_addr = pbuflen - box - 1;
|
||||
if (quirks & QUIRK_LITTLE_ENDIAN)
|
||||
box_addr = get_le_offset(box_addr);
|
||||
if (quirks & QUIRK_LSW32_IS_FIRST)
|
||||
box_addr = get_reverse_lsw32_offset(box_addr,
|
||||
pbuflen);
|
||||
|
||||
if (op == UNPACK) {
|
||||
u64 pval;
|
||||
|
||||
/* Read from pbuf, write to uval */
|
||||
pval = ((u8 *)pbuf)[box_addr] & box_mask;
|
||||
if (quirks & QUIRK_MSB_ON_THE_RIGHT)
|
||||
adjust_for_msb_right_quirk(&pval,
|
||||
&box_start_bit,
|
||||
&box_end_bit,
|
||||
&box_mask);
|
||||
|
||||
pval >>= box_end_bit;
|
||||
pval <<= proj_end_bit;
|
||||
*uval &= ~proj_mask;
|
||||
*uval |= pval;
|
||||
} else {
|
||||
u64 pval;
|
||||
|
||||
/* Write to pbuf, read from uval */
|
||||
pval = (*uval) & proj_mask;
|
||||
pval >>= proj_end_bit;
|
||||
if (quirks & QUIRK_MSB_ON_THE_RIGHT)
|
||||
adjust_for_msb_right_quirk(&pval,
|
||||
&box_start_bit,
|
||||
&box_end_bit,
|
||||
&box_mask);
|
||||
|
||||
pval <<= box_end_bit;
|
||||
((u8 *)pbuf)[box_addr] &= ~box_mask;
|
||||
((u8 *)pbuf)[box_addr] |= pval;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(packing);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Generic bitfield packing and unpacking");
|
@@ -151,7 +151,7 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
|
||||
atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
|
||||
|
||||
WARN_ONCE(atomic_long_read(&ref->count) <= 0,
|
||||
"percpu ref (%pf) <= 0 (%ld) after switching to atomic",
|
||||
"percpu ref (%ps) <= 0 (%ld) after switching to atomic",
|
||||
ref->release, atomic_long_read(&ref->count));
|
||||
|
||||
/* @ref is viewed as dead on all CPUs, send out switch confirmation */
|
||||
@@ -333,7 +333,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
||||
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
||||
|
||||
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
|
||||
"%s called more than once on %pf!", __func__, ref->release);
|
||||
"%s called more than once on %ps!", __func__, ref->release);
|
||||
|
||||
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
|
||||
__percpu_ref_switch_mode(ref, confirm_kill);
|
||||
|
@@ -26,7 +26,7 @@
|
||||
#include <linux/bug.h>
|
||||
#include <linux/plist.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_PI_LIST
|
||||
#ifdef CONFIG_DEBUG_PLIST
|
||||
|
||||
static struct plist_head test_head;
|
||||
|
||||
@@ -173,7 +173,7 @@ void plist_requeue(struct plist_node *node, struct plist_head *head)
|
||||
plist_check_head(head);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PI_LIST
|
||||
#ifdef CONFIG_DEBUG_PLIST
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/module.h>
|
||||
|
208
lib/rhashtable.c
208
lib/rhashtable.c
@@ -31,11 +31,10 @@
|
||||
|
||||
#define HASH_DEFAULT_SIZE 64UL
|
||||
#define HASH_MIN_SIZE 4U
|
||||
#define BUCKET_LOCKS_PER_CPU 32UL
|
||||
|
||||
union nested_table {
|
||||
union nested_table __rcu *table;
|
||||
struct rhash_head __rcu *bucket;
|
||||
struct rhash_lock_head __rcu *bucket;
|
||||
};
|
||||
|
||||
static u32 head_hashfn(struct rhashtable *ht,
|
||||
@@ -56,9 +55,11 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
|
||||
|
||||
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
|
||||
{
|
||||
spinlock_t *lock = rht_bucket_lock(tbl, hash);
|
||||
|
||||
return (debug_locks) ? lockdep_is_held(lock) : 1;
|
||||
if (!debug_locks)
|
||||
return 1;
|
||||
if (unlikely(tbl->nest))
|
||||
return 1;
|
||||
return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
|
||||
#else
|
||||
@@ -104,7 +105,6 @@ static void bucket_table_free(const struct bucket_table *tbl)
|
||||
if (tbl->nest)
|
||||
nested_bucket_table_free(tbl);
|
||||
|
||||
free_bucket_spinlocks(tbl->locks);
|
||||
kvfree(tbl);
|
||||
}
|
||||
|
||||
@@ -131,9 +131,11 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
|
||||
INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
|
||||
}
|
||||
|
||||
rcu_assign_pointer(*prev, ntbl);
|
||||
|
||||
return ntbl;
|
||||
if (cmpxchg(prev, NULL, ntbl) == NULL)
|
||||
return ntbl;
|
||||
/* Raced with another thread. */
|
||||
kfree(ntbl);
|
||||
return rcu_dereference(*prev);
|
||||
}
|
||||
|
||||
static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
|
||||
@@ -169,11 +171,11 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct bucket_table *tbl = NULL;
|
||||
size_t size, max_locks;
|
||||
size_t size;
|
||||
int i;
|
||||
static struct lock_class_key __key;
|
||||
|
||||
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
|
||||
tbl = kvzalloc(size, gfp);
|
||||
tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
|
||||
|
||||
size = nbuckets;
|
||||
|
||||
@@ -185,18 +187,11 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
||||
if (tbl == NULL)
|
||||
return NULL;
|
||||
|
||||
lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
|
||||
|
||||
tbl->size = size;
|
||||
|
||||
max_locks = size >> 1;
|
||||
if (tbl->nest)
|
||||
max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
|
||||
|
||||
if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
|
||||
ht->p.locks_mul, gfp) < 0) {
|
||||
bucket_table_free(tbl);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rcu_head_init(&tbl->rcu);
|
||||
INIT_LIST_HEAD(&tbl->walkers);
|
||||
|
||||
tbl->hash_rnd = get_random_u32();
|
||||
@@ -220,14 +215,15 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
|
||||
return new_tbl;
|
||||
}
|
||||
|
||||
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
|
||||
static int rhashtable_rehash_one(struct rhashtable *ht,
|
||||
struct rhash_lock_head __rcu **bkt,
|
||||
unsigned int old_hash)
|
||||
{
|
||||
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
|
||||
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
|
||||
int err = -EAGAIN;
|
||||
struct rhash_head *head, *next, *entry;
|
||||
spinlock_t *new_bucket_lock;
|
||||
struct rhash_head __rcu **pprev = NULL;
|
||||
unsigned int new_hash;
|
||||
|
||||
if (new_tbl->nest)
|
||||
@@ -235,7 +231,8 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
|
||||
|
||||
err = -ENOENT;
|
||||
|
||||
rht_for_each(entry, old_tbl, old_hash) {
|
||||
rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
|
||||
old_tbl, old_hash) {
|
||||
err = 0;
|
||||
next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
|
||||
|
||||
@@ -250,18 +247,19 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
|
||||
|
||||
new_hash = head_hashfn(ht, new_tbl, entry);
|
||||
|
||||
new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
|
||||
rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
|
||||
|
||||
spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
|
||||
head = rht_dereference_bucket(new_tbl->buckets[new_hash],
|
||||
new_tbl, new_hash);
|
||||
head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
|
||||
|
||||
RCU_INIT_POINTER(entry->next, head);
|
||||
|
||||
rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
|
||||
spin_unlock(new_bucket_lock);
|
||||
rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
|
||||
|
||||
rcu_assign_pointer(*pprev, next);
|
||||
if (pprev)
|
||||
rcu_assign_pointer(*pprev, next);
|
||||
else
|
||||
/* Need to preserved the bit lock. */
|
||||
rht_assign_locked(bkt, next);
|
||||
|
||||
out:
|
||||
return err;
|
||||
@@ -271,20 +269,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
|
||||
unsigned int old_hash)
|
||||
{
|
||||
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
spinlock_t *old_bucket_lock;
|
||||
struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
|
||||
int err;
|
||||
|
||||
old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
|
||||
if (!bkt)
|
||||
return 0;
|
||||
rht_lock(old_tbl, bkt);
|
||||
|
||||
spin_lock_bh(old_bucket_lock);
|
||||
while (!(err = rhashtable_rehash_one(ht, old_hash)))
|
||||
while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
|
||||
;
|
||||
|
||||
if (err == -ENOENT) {
|
||||
old_tbl->rehash++;
|
||||
if (err == -ENOENT)
|
||||
err = 0;
|
||||
}
|
||||
spin_unlock_bh(old_bucket_lock);
|
||||
rht_unlock(old_tbl, bkt);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -330,13 +327,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
|
||||
spin_lock(&ht->lock);
|
||||
list_for_each_entry(walker, &old_tbl->walkers, list)
|
||||
walker->tbl = NULL;
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
/* Wait for readers. All new readers will see the new
|
||||
* table, and thus no references to the old table will
|
||||
* remain.
|
||||
* We do this inside the locked region so that
|
||||
* rhashtable_walk_stop() can use rcu_head_after_call_rcu()
|
||||
* to check if it should not re-link the table.
|
||||
*/
|
||||
call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
|
||||
}
|
||||
@@ -478,6 +478,7 @@ fail:
|
||||
}
|
||||
|
||||
static void *rhashtable_lookup_one(struct rhashtable *ht,
|
||||
struct rhash_lock_head __rcu **bkt,
|
||||
struct bucket_table *tbl, unsigned int hash,
|
||||
const void *key, struct rhash_head *obj)
|
||||
{
|
||||
@@ -485,13 +486,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
|
||||
.ht = ht,
|
||||
.key = key,
|
||||
};
|
||||
struct rhash_head __rcu **pprev;
|
||||
struct rhash_head __rcu **pprev = NULL;
|
||||
struct rhash_head *head;
|
||||
int elasticity;
|
||||
|
||||
elasticity = RHT_ELASTICITY;
|
||||
pprev = rht_bucket_var(tbl, hash);
|
||||
rht_for_each_continue(head, *pprev, tbl, hash) {
|
||||
rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
|
||||
struct rhlist_head *list;
|
||||
struct rhlist_head *plist;
|
||||
|
||||
@@ -513,7 +513,11 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
|
||||
RCU_INIT_POINTER(list->next, plist);
|
||||
head = rht_dereference_bucket(head->next, tbl, hash);
|
||||
RCU_INIT_POINTER(list->rhead.next, head);
|
||||
rcu_assign_pointer(*pprev, obj);
|
||||
if (pprev)
|
||||
rcu_assign_pointer(*pprev, obj);
|
||||
else
|
||||
/* Need to preserve the bit lock */
|
||||
rht_assign_locked(bkt, obj);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -525,12 +529,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
|
||||
}
|
||||
|
||||
static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
|
||||
struct rhash_lock_head __rcu **bkt,
|
||||
struct bucket_table *tbl,
|
||||
unsigned int hash,
|
||||
struct rhash_head *obj,
|
||||
void *data)
|
||||
{
|
||||
struct rhash_head __rcu **pprev;
|
||||
struct bucket_table *new_tbl;
|
||||
struct rhash_head *head;
|
||||
|
||||
@@ -553,11 +557,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
|
||||
if (unlikely(rht_grow_above_100(ht, tbl)))
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
pprev = rht_bucket_insert(ht, tbl, hash);
|
||||
if (!pprev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
head = rht_dereference_bucket(*pprev, tbl, hash);
|
||||
head = rht_ptr(bkt, tbl, hash);
|
||||
|
||||
RCU_INIT_POINTER(obj->next, head);
|
||||
if (ht->rhlist) {
|
||||
@@ -567,7 +567,10 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
|
||||
RCU_INIT_POINTER(list->next, NULL);
|
||||
}
|
||||
|
||||
rcu_assign_pointer(*pprev, obj);
|
||||
/* bkt is always the head of the list, so it holds
|
||||
* the lock, which we need to preserve
|
||||
*/
|
||||
rht_assign_locked(bkt, obj);
|
||||
|
||||
atomic_inc(&ht->nelems);
|
||||
if (rht_grow_above_75(ht, tbl))
|
||||
@@ -581,47 +584,35 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
|
||||
{
|
||||
struct bucket_table *new_tbl;
|
||||
struct bucket_table *tbl;
|
||||
struct rhash_lock_head __rcu **bkt;
|
||||
unsigned int hash;
|
||||
spinlock_t *lock;
|
||||
void *data;
|
||||
|
||||
tbl = rcu_dereference(ht->tbl);
|
||||
new_tbl = rcu_dereference(ht->tbl);
|
||||
|
||||
/* All insertions must grab the oldest table containing
|
||||
* the hashed bucket that is yet to be rehashed.
|
||||
*/
|
||||
for (;;) {
|
||||
hash = rht_head_hashfn(ht, tbl, obj, ht->p);
|
||||
lock = rht_bucket_lock(tbl, hash);
|
||||
spin_lock_bh(lock);
|
||||
|
||||
if (tbl->rehash <= hash)
|
||||
break;
|
||||
|
||||
spin_unlock_bh(lock);
|
||||
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
}
|
||||
|
||||
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
|
||||
new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
|
||||
if (PTR_ERR(new_tbl) != -EEXIST)
|
||||
data = ERR_CAST(new_tbl);
|
||||
|
||||
while (!IS_ERR_OR_NULL(new_tbl)) {
|
||||
do {
|
||||
tbl = new_tbl;
|
||||
hash = rht_head_hashfn(ht, tbl, obj, ht->p);
|
||||
spin_lock_nested(rht_bucket_lock(tbl, hash),
|
||||
SINGLE_DEPTH_NESTING);
|
||||
if (rcu_access_pointer(tbl->future_tbl))
|
||||
/* Failure is OK */
|
||||
bkt = rht_bucket_var(tbl, hash);
|
||||
else
|
||||
bkt = rht_bucket_insert(ht, tbl, hash);
|
||||
if (bkt == NULL) {
|
||||
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
data = ERR_PTR(-EAGAIN);
|
||||
} else {
|
||||
rht_lock(tbl, bkt);
|
||||
data = rhashtable_lookup_one(ht, bkt, tbl,
|
||||
hash, key, obj);
|
||||
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
|
||||
hash, obj, data);
|
||||
if (PTR_ERR(new_tbl) != -EEXIST)
|
||||
data = ERR_CAST(new_tbl);
|
||||
|
||||
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
|
||||
new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
|
||||
if (PTR_ERR(new_tbl) != -EEXIST)
|
||||
data = ERR_CAST(new_tbl);
|
||||
|
||||
spin_unlock(rht_bucket_lock(tbl, hash));
|
||||
}
|
||||
|
||||
spin_unlock_bh(lock);
|
||||
rht_unlock(tbl, bkt);
|
||||
}
|
||||
} while (!IS_ERR_OR_NULL(new_tbl));
|
||||
|
||||
if (PTR_ERR(data) == -EAGAIN)
|
||||
data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
|
||||
@@ -943,10 +934,11 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
|
||||
ht = iter->ht;
|
||||
|
||||
spin_lock(&ht->lock);
|
||||
if (tbl->rehash < tbl->size)
|
||||
list_add(&iter->walker.list, &tbl->walkers);
|
||||
else
|
||||
if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
|
||||
/* This bucket table is being freed, don't re-link it. */
|
||||
iter->walker.tbl = NULL;
|
||||
else
|
||||
list_add(&iter->walker.list, &tbl->walkers);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
out:
|
||||
@@ -1046,11 +1038,6 @@ int rhashtable_init(struct rhashtable *ht,
|
||||
|
||||
size = rounded_hashtable_size(&ht->p);
|
||||
|
||||
if (params->locks_mul)
|
||||
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
|
||||
else
|
||||
ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
|
||||
|
||||
ht->key_len = ht->p.key_len;
|
||||
if (!params->hashfn) {
|
||||
ht->p.hashfn = jhash;
|
||||
@@ -1152,7 +1139,7 @@ restart:
|
||||
struct rhash_head *pos, *next;
|
||||
|
||||
cond_resched();
|
||||
for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
|
||||
for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
|
||||
next = !rht_is_a_nulls(pos) ?
|
||||
rht_dereference(pos->next, ht) : NULL;
|
||||
!rht_is_a_nulls(pos);
|
||||
@@ -1179,11 +1166,10 @@ void rhashtable_destroy(struct rhashtable *ht)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rhashtable_destroy);
|
||||
|
||||
struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
|
||||
unsigned int hash)
|
||||
struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
|
||||
unsigned int hash)
|
||||
{
|
||||
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
|
||||
static struct rhash_head __rcu *rhnull;
|
||||
unsigned int index = hash & ((1 << tbl->nest) - 1);
|
||||
unsigned int size = tbl->size >> tbl->nest;
|
||||
unsigned int subhash = hash;
|
||||
@@ -1201,20 +1187,28 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
|
||||
subhash >>= shift;
|
||||
}
|
||||
|
||||
if (!ntbl) {
|
||||
if (!rhnull)
|
||||
INIT_RHT_NULLS_HEAD(rhnull);
|
||||
return &rhnull;
|
||||
}
|
||||
if (!ntbl)
|
||||
return NULL;
|
||||
|
||||
return &ntbl[subhash].bucket;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
|
||||
|
||||
struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
|
||||
unsigned int hash)
|
||||
{
|
||||
static struct rhash_lock_head __rcu *rhnull;
|
||||
|
||||
if (!rhnull)
|
||||
INIT_RHT_NULLS_HEAD(rhnull);
|
||||
return __rht_bucket_nested(tbl, hash) ?: &rhnull;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rht_bucket_nested);
|
||||
|
||||
struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
|
||||
struct bucket_table *tbl,
|
||||
unsigned int hash)
|
||||
struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
|
||||
struct bucket_table *tbl,
|
||||
unsigned int hash)
|
||||
{
|
||||
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
|
||||
unsigned int index = hash & ((1 << tbl->nest) - 1);
|
||||
|
@@ -68,11 +68,11 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
bytemask_from_count(left)));
|
||||
#else
|
||||
switch (left) {
|
||||
case 7: b |= ((u64)end[6]) << 48;
|
||||
case 6: b |= ((u64)end[5]) << 40;
|
||||
case 5: b |= ((u64)end[4]) << 32;
|
||||
case 7: b |= ((u64)end[6]) << 48; /* fall through */
|
||||
case 6: b |= ((u64)end[5]) << 40; /* fall through */
|
||||
case 5: b |= ((u64)end[4]) << 32; /* fall through */
|
||||
case 4: b |= le32_to_cpup(data); break;
|
||||
case 3: b |= ((u64)end[2]) << 16;
|
||||
case 3: b |= ((u64)end[2]) << 16; /* fall through */
|
||||
case 2: b |= le16_to_cpup(data); break;
|
||||
case 1: b |= end[0];
|
||||
}
|
||||
@@ -101,11 +101,11 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
bytemask_from_count(left)));
|
||||
#else
|
||||
switch (left) {
|
||||
case 7: b |= ((u64)end[6]) << 48;
|
||||
case 6: b |= ((u64)end[5]) << 40;
|
||||
case 5: b |= ((u64)end[4]) << 32;
|
||||
case 7: b |= ((u64)end[6]) << 48; /* fall through */
|
||||
case 6: b |= ((u64)end[5]) << 40; /* fall through */
|
||||
case 5: b |= ((u64)end[4]) << 32; /* fall through */
|
||||
case 4: b |= get_unaligned_le32(end); break;
|
||||
case 3: b |= ((u64)end[2]) << 16;
|
||||
case 3: b |= ((u64)end[2]) << 16; /* fall through */
|
||||
case 2: b |= get_unaligned_le16(end); break;
|
||||
case 1: b |= end[0];
|
||||
}
|
||||
@@ -268,11 +268,11 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
bytemask_from_count(left)));
|
||||
#else
|
||||
switch (left) {
|
||||
case 7: b |= ((u64)end[6]) << 48;
|
||||
case 6: b |= ((u64)end[5]) << 40;
|
||||
case 5: b |= ((u64)end[4]) << 32;
|
||||
case 7: b |= ((u64)end[6]) << 48; /* fall through */
|
||||
case 6: b |= ((u64)end[5]) << 40; /* fall through */
|
||||
case 5: b |= ((u64)end[4]) << 32; /* fall through */
|
||||
case 4: b |= le32_to_cpup(data); break;
|
||||
case 3: b |= ((u64)end[2]) << 16;
|
||||
case 3: b |= ((u64)end[2]) << 16; /* fall through */
|
||||
case 2: b |= le16_to_cpup(data); break;
|
||||
case 1: b |= end[0];
|
||||
}
|
||||
@@ -301,11 +301,11 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
bytemask_from_count(left)));
|
||||
#else
|
||||
switch (left) {
|
||||
case 7: b |= ((u64)end[6]) << 48;
|
||||
case 6: b |= ((u64)end[5]) << 40;
|
||||
case 5: b |= ((u64)end[4]) << 32;
|
||||
case 7: b |= ((u64)end[6]) << 48; /* fall through */
|
||||
case 6: b |= ((u64)end[5]) << 40; /* fall through */
|
||||
case 5: b |= ((u64)end[4]) << 32; /* fall through */
|
||||
case 4: b |= get_unaligned_le32(end); break;
|
||||
case 3: b |= ((u64)end[2]) << 16;
|
||||
case 3: b |= ((u64)end[2]) << 16; /* fall through */
|
||||
case 2: b |= get_unaligned_le16(end); break;
|
||||
case 1: b |= end[0];
|
||||
}
|
||||
@@ -431,7 +431,7 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
v0 ^= m;
|
||||
}
|
||||
switch (left) {
|
||||
case 3: b |= ((u32)end[2]) << 16;
|
||||
case 3: b |= ((u32)end[2]) << 16; /* fall through */
|
||||
case 2: b |= le16_to_cpup(data); break;
|
||||
case 1: b |= end[0];
|
||||
}
|
||||
@@ -454,7 +454,7 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
v0 ^= m;
|
||||
}
|
||||
switch (left) {
|
||||
case 3: b |= ((u32)end[2]) << 16;
|
||||
case 3: b |= ((u32)end[2]) << 16; /* fall through */
|
||||
case 2: b |= get_unaligned_le16(end); break;
|
||||
case 1: b |= end[0];
|
||||
}
|
||||
|
266
lib/sort.c
266
lib/sort.c
@@ -1,8 +1,13 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* A fast, small, non-recursive O(nlog n) sort for the Linux kernel
|
||||
* A fast, small, non-recursive O(n log n) sort for the Linux kernel
|
||||
*
|
||||
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
|
||||
* This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
|
||||
* and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
|
||||
*
|
||||
* Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
|
||||
* better) at the expense of stack usage and much larger code to avoid
|
||||
* quicksort's O(n^2) worst case.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
@@ -11,35 +16,155 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
static int alignment_ok(const void *base, int align)
|
||||
/**
|
||||
* is_aligned - is this pointer & size okay for word-wide copying?
|
||||
* @base: pointer to data
|
||||
* @size: size of each element
|
||||
* @align: required alignment (typically 4 or 8)
|
||||
*
|
||||
* Returns true if elements can be copied using word loads and stores.
|
||||
* The size must be a multiple of the alignment, and the base address must
|
||||
* be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
|
||||
*
|
||||
* For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
|
||||
* to "if ((a | b) & mask)", so we do that by hand.
|
||||
*/
|
||||
__attribute_const__ __always_inline
|
||||
static bool is_aligned(const void *base, size_t size, unsigned char align)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
((unsigned long)base & (align - 1)) == 0;
|
||||
unsigned char lsbits = (unsigned char)size;
|
||||
|
||||
(void)base;
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
lsbits |= (unsigned char)(uintptr_t)base;
|
||||
#endif
|
||||
return (lsbits & (align - 1)) == 0;
|
||||
}
|
||||
|
||||
static void u32_swap(void *a, void *b, int size)
|
||||
/**
|
||||
* swap_words_32 - swap two elements in 32-bit chunks
|
||||
* @a, @b: pointers to the elements
|
||||
* @size: element size (must be a multiple of 4)
|
||||
*
|
||||
* Exchange the two objects in memory. This exploits base+index addressing,
|
||||
* which basically all CPUs have, to minimize loop overhead computations.
|
||||
*
|
||||
* For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
|
||||
* bottom of the loop, even though the zero flag is stil valid from the
|
||||
* subtract (since the intervening mov instructions don't alter the flags).
|
||||
* Gcc 8.1.0 doesn't have that problem.
|
||||
*/
|
||||
static void swap_words_32(void *a, void *b, size_t n)
|
||||
{
|
||||
u32 t = *(u32 *)a;
|
||||
*(u32 *)a = *(u32 *)b;
|
||||
*(u32 *)b = t;
|
||||
}
|
||||
|
||||
static void u64_swap(void *a, void *b, int size)
|
||||
{
|
||||
u64 t = *(u64 *)a;
|
||||
*(u64 *)a = *(u64 *)b;
|
||||
*(u64 *)b = t;
|
||||
}
|
||||
|
||||
static void generic_swap(void *a, void *b, int size)
|
||||
{
|
||||
char t;
|
||||
|
||||
do {
|
||||
t = *(char *)a;
|
||||
*(char *)a++ = *(char *)b;
|
||||
*(char *)b++ = t;
|
||||
} while (--size > 0);
|
||||
u32 t = *(u32 *)(a + (n -= 4));
|
||||
*(u32 *)(a + n) = *(u32 *)(b + n);
|
||||
*(u32 *)(b + n) = t;
|
||||
} while (n);
|
||||
}
|
||||
|
||||
/**
|
||||
* swap_words_64 - swap two elements in 64-bit chunks
|
||||
* @a, @b: pointers to the elements
|
||||
* @size: element size (must be a multiple of 8)
|
||||
*
|
||||
* Exchange the two objects in memory. This exploits base+index
|
||||
* addressing, which basically all CPUs have, to minimize loop overhead
|
||||
* computations.
|
||||
*
|
||||
* We'd like to use 64-bit loads if possible. If they're not, emulating
|
||||
* one requires base+index+4 addressing which x86 has but most other
|
||||
* processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
|
||||
* but it's possible to have 64-bit loads without 64-bit pointers (e.g.
|
||||
* x32 ABI). Are there any cases the kernel needs to worry about?
|
||||
*/
|
||||
static void swap_words_64(void *a, void *b, size_t n)
|
||||
{
|
||||
do {
|
||||
#ifdef CONFIG_64BIT
|
||||
u64 t = *(u64 *)(a + (n -= 8));
|
||||
*(u64 *)(a + n) = *(u64 *)(b + n);
|
||||
*(u64 *)(b + n) = t;
|
||||
#else
|
||||
/* Use two 32-bit transfers to avoid base+index+4 addressing */
|
||||
u32 t = *(u32 *)(a + (n -= 4));
|
||||
*(u32 *)(a + n) = *(u32 *)(b + n);
|
||||
*(u32 *)(b + n) = t;
|
||||
|
||||
t = *(u32 *)(a + (n -= 4));
|
||||
*(u32 *)(a + n) = *(u32 *)(b + n);
|
||||
*(u32 *)(b + n) = t;
|
||||
#endif
|
||||
} while (n);
|
||||
}
|
||||
|
||||
/**
|
||||
* swap_bytes - swap two elements a byte at a time
|
||||
* @a, @b: pointers to the elements
|
||||
* @size: element size
|
||||
*
|
||||
* This is the fallback if alignment doesn't allow using larger chunks.
|
||||
*/
|
||||
static void swap_bytes(void *a, void *b, size_t n)
|
||||
{
|
||||
do {
|
||||
char t = ((char *)a)[--n];
|
||||
((char *)a)[n] = ((char *)b)[n];
|
||||
((char *)b)[n] = t;
|
||||
} while (n);
|
||||
}
|
||||
|
||||
typedef void (*swap_func_t)(void *a, void *b, int size);
|
||||
|
||||
/*
|
||||
* The values are arbitrary as long as they can't be confused with
|
||||
* a pointer, but small integers make for the smallest compare
|
||||
* instructions.
|
||||
*/
|
||||
#define SWAP_WORDS_64 (swap_func_t)0
|
||||
#define SWAP_WORDS_32 (swap_func_t)1
|
||||
#define SWAP_BYTES (swap_func_t)2
|
||||
|
||||
/*
|
||||
* The function pointer is last to make tail calls most efficient if the
|
||||
* compiler decides not to inline this function.
|
||||
*/
|
||||
static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
|
||||
{
|
||||
if (swap_func == SWAP_WORDS_64)
|
||||
swap_words_64(a, b, size);
|
||||
else if (swap_func == SWAP_WORDS_32)
|
||||
swap_words_32(a, b, size);
|
||||
else if (swap_func == SWAP_BYTES)
|
||||
swap_bytes(a, b, size);
|
||||
else
|
||||
swap_func(a, b, (int)size);
|
||||
}
|
||||
|
||||
/**
|
||||
* parent - given the offset of the child, find the offset of the parent.
|
||||
* @i: the offset of the heap element whose parent is sought. Non-zero.
|
||||
* @lsbit: a precomputed 1-bit mask, equal to "size & -size"
|
||||
* @size: size of each element
|
||||
*
|
||||
* In terms of array indexes, the parent of element j = @i/@size is simply
|
||||
* (j-1)/2. But when working in byte offsets, we can't use implicit
|
||||
* truncation of integer divides.
|
||||
*
|
||||
* Fortunately, we only need one bit of the quotient, not the full divide.
|
||||
* @size has a least significant bit. That bit will be clear if @i is
|
||||
* an even multiple of @size, and set if it's an odd multiple.
|
||||
*
|
||||
* Logically, we're doing "if (i & lsbit) i -= size;", but since the
|
||||
* branch is unpredictable, it's done with a bit of clever branch-free
|
||||
* code instead.
|
||||
*/
|
||||
__attribute_const__ __always_inline
|
||||
static size_t parent(size_t i, unsigned int lsbit, size_t size)
|
||||
{
|
||||
i -= size;
|
||||
i -= size & -(i & lsbit);
|
||||
return i / 2;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -50,57 +175,78 @@ static void generic_swap(void *a, void *b, int size)
|
||||
* @cmp_func: pointer to comparison function
|
||||
* @swap_func: pointer to swap function or NULL
|
||||
*
|
||||
* This function does a heapsort on the given array. You may provide a
|
||||
* swap_func function optimized to your element type.
|
||||
* This function does a heapsort on the given array. You may provide
|
||||
* a swap_func function if you need to do something more than a memory
|
||||
* copy (e.g. fix up pointers or auxiliary data), but the built-in swap
|
||||
* avoids a slow retpoline and so is significantly faster.
|
||||
*
|
||||
* Sorting time is O(n log n) both on average and worst-case. While
|
||||
* qsort is about 20% faster on average, it suffers from exploitable
|
||||
* quicksort is slightly faster on average, it suffers from exploitable
|
||||
* O(n*n) worst-case behavior and extra memory requirements that make
|
||||
* it less suitable for kernel use.
|
||||
*/
|
||||
|
||||
void sort(void *base, size_t num, size_t size,
|
||||
int (*cmp_func)(const void *, const void *),
|
||||
void (*swap_func)(void *, void *, int size))
|
||||
{
|
||||
/* pre-scale counters for performance */
|
||||
int i = (num/2 - 1) * size, n = num * size, c, r;
|
||||
size_t n = num * size, a = (num/2) * size;
|
||||
const unsigned int lsbit = size & -size; /* Used to find parent */
|
||||
|
||||
if (!a) /* num < 2 || size == 0 */
|
||||
return;
|
||||
|
||||
if (!swap_func) {
|
||||
if (size == 4 && alignment_ok(base, 4))
|
||||
swap_func = u32_swap;
|
||||
else if (size == 8 && alignment_ok(base, 8))
|
||||
swap_func = u64_swap;
|
||||
if (is_aligned(base, size, 8))
|
||||
swap_func = SWAP_WORDS_64;
|
||||
else if (is_aligned(base, size, 4))
|
||||
swap_func = SWAP_WORDS_32;
|
||||
else
|
||||
swap_func = generic_swap;
|
||||
swap_func = SWAP_BYTES;
|
||||
}
|
||||
|
||||
/* heapify */
|
||||
for ( ; i >= 0; i -= size) {
|
||||
for (r = i; r * 2 + size < n; r = c) {
|
||||
c = r * 2 + size;
|
||||
if (c < n - size &&
|
||||
cmp_func(base + c, base + c + size) < 0)
|
||||
c += size;
|
||||
if (cmp_func(base + r, base + c) >= 0)
|
||||
break;
|
||||
swap_func(base + r, base + c, size);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Loop invariants:
|
||||
* 1. elements [a,n) satisfy the heap property (compare greater than
|
||||
* all of their children),
|
||||
* 2. elements [n,num*size) are sorted, and
|
||||
* 3. a <= b <= c <= d <= n (whenever they are valid).
|
||||
*/
|
||||
for (;;) {
|
||||
size_t b, c, d;
|
||||
|
||||
/* sort */
|
||||
for (i = n - size; i > 0; i -= size) {
|
||||
swap_func(base, base + i, size);
|
||||
for (r = 0; r * 2 + size < i; r = c) {
|
||||
c = r * 2 + size;
|
||||
if (c < i - size &&
|
||||
cmp_func(base + c, base + c + size) < 0)
|
||||
c += size;
|
||||
if (cmp_func(base + r, base + c) >= 0)
|
||||
break;
|
||||
swap_func(base + r, base + c, size);
|
||||
if (a) /* Building heap: sift down --a */
|
||||
a -= size;
|
||||
else if (n -= size) /* Sorting: Extract root to --n */
|
||||
do_swap(base, base + n, size, swap_func);
|
||||
else /* Sort complete */
|
||||
break;
|
||||
|
||||
/*
|
||||
* Sift element at "a" down into heap. This is the
|
||||
* "bottom-up" variant, which significantly reduces
|
||||
* calls to cmp_func(): we find the sift-down path all
|
||||
* the way to the leaves (one compare per level), then
|
||||
* backtrack to find where to insert the target element.
|
||||
*
|
||||
* Because elements tend to sift down close to the leaves,
|
||||
* this uses fewer compares than doing two per level
|
||||
* on the way down. (A bit more than half as many on
|
||||
* average, 3/4 worst-case.)
|
||||
*/
|
||||
for (b = a; c = 2*b + size, (d = c + size) < n;)
|
||||
b = cmp_func(base + c, base + d) >= 0 ? c : d;
|
||||
if (d == n) /* Special case last leaf with no sibling */
|
||||
b = c;
|
||||
|
||||
/* Now backtrack from "b" to the correct location for "a" */
|
||||
while (b != a && cmp_func(base + a, base + b) >= 0)
|
||||
b = parent(b, lsbit, size);
|
||||
c = b; /* Where "a" belongs */
|
||||
while (b != a) { /* Shift it into place */
|
||||
b = parent(b, lsbit, size);
|
||||
do_swap(base + b, base + c, size, swap_func);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(sort);
|
||||
|
47
lib/string.c
47
lib/string.c
@@ -159,11 +159,9 @@ EXPORT_SYMBOL(strlcpy);
|
||||
* @src: Where to copy the string from
|
||||
* @count: Size of destination buffer
|
||||
*
|
||||
* Copy the string, or as much of it as fits, into the dest buffer.
|
||||
* The routine returns the number of characters copied (not including
|
||||
* the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
|
||||
* The behavior is undefined if the string buffers overlap.
|
||||
* The destination buffer is always NUL terminated, unless it's zero-sized.
|
||||
* Copy the string, or as much of it as fits, into the dest buffer. The
|
||||
* behavior is undefined if the string buffers overlap. The destination
|
||||
* buffer is always NUL terminated, unless it's zero-sized.
|
||||
*
|
||||
* Preferred to strlcpy() since the API doesn't require reading memory
|
||||
* from the src string beyond the specified "count" bytes, and since
|
||||
@@ -173,8 +171,10 @@ EXPORT_SYMBOL(strlcpy);
|
||||
*
|
||||
* Preferred to strncpy() since it always returns a valid string, and
|
||||
* doesn't unnecessarily force the tail of the destination buffer to be
|
||||
* zeroed. If the zeroing is desired, it's likely cleaner to use strscpy()
|
||||
* with an overflow test, then just memset() the tail of the dest buffer.
|
||||
* zeroed. If zeroing is desired please use strscpy_pad().
|
||||
*
|
||||
* Return: The number of characters copied (not including the trailing
|
||||
* %NUL) or -E2BIG if the destination buffer wasn't big enough.
|
||||
*/
|
||||
ssize_t strscpy(char *dest, const char *src, size_t count)
|
||||
{
|
||||
@@ -237,6 +237,39 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
|
||||
EXPORT_SYMBOL(strscpy);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* strscpy_pad() - Copy a C-string into a sized buffer
|
||||
* @dest: Where to copy the string to
|
||||
* @src: Where to copy the string from
|
||||
* @count: Size of destination buffer
|
||||
*
|
||||
* Copy the string, or as much of it as fits, into the dest buffer. The
|
||||
* behavior is undefined if the string buffers overlap. The destination
|
||||
* buffer is always %NUL terminated, unless it's zero-sized.
|
||||
*
|
||||
* If the source string is shorter than the destination buffer, zeros
|
||||
* the tail of the destination buffer.
|
||||
*
|
||||
* For full explanation of why you may want to consider using the
|
||||
* 'strscpy' functions please see the function docstring for strscpy().
|
||||
*
|
||||
* Return: The number of characters copied (not including the trailing
|
||||
* %NUL) or -E2BIG if the destination buffer wasn't big enough.
|
||||
*/
|
||||
ssize_t strscpy_pad(char *dest, const char *src, size_t count)
|
||||
{
|
||||
ssize_t written;
|
||||
|
||||
written = strscpy(dest, src, count);
|
||||
if (written < 0 || written == count - 1)
|
||||
return written;
|
||||
|
||||
memset(dest + written + 1, 0, count - written - 1);
|
||||
|
||||
return written;
|
||||
}
|
||||
EXPORT_SYMBOL(strscpy_pad);
|
||||
|
||||
#ifndef __HAVE_ARCH_STRCAT
|
||||
/**
|
||||
* strcat - Append one %NUL-terminated string to another
|
||||
|
@@ -11,6 +11,9 @@
|
||||
#include <linux/printk.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "../tools/testing/selftests/kselftest_module.h"
|
||||
|
||||
static unsigned total_tests __initdata;
|
||||
static unsigned failed_tests __initdata;
|
||||
@@ -224,7 +227,8 @@ static const unsigned long exp[] __initconst = {
|
||||
BITMAP_FROM_U64(0xffffffff),
|
||||
BITMAP_FROM_U64(0xfffffffe),
|
||||
BITMAP_FROM_U64(0x3333333311111111ULL),
|
||||
BITMAP_FROM_U64(0xffffffff77777777ULL)
|
||||
BITMAP_FROM_U64(0xffffffff77777777ULL),
|
||||
BITMAP_FROM_U64(0),
|
||||
};
|
||||
|
||||
static const unsigned long exp2[] __initconst = {
|
||||
@@ -247,55 +251,93 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
|
||||
{0, "1-31:4/4", &exp[9 * step], 32, 0},
|
||||
{0, "0-31:1/4,32-63:2/4", &exp[10 * step], 64, 0},
|
||||
{0, "0-31:3/4,32-63:4/4", &exp[11 * step], 64, 0},
|
||||
{0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp[11 * step], 64, 0},
|
||||
|
||||
{0, "0-31:1/4,32-63:2/4,64-95:3/4,96-127:4/4", exp2, 128, 0},
|
||||
|
||||
{0, "0-2047:128/256", NULL, 2048, PARSE_TIME},
|
||||
|
||||
{0, "", &exp[12 * step], 8, 0},
|
||||
{0, "\n", &exp[12 * step], 8, 0},
|
||||
{0, ",, ,, , , ,", &exp[12 * step], 8, 0},
|
||||
{0, " , ,, , , ", &exp[12 * step], 8, 0},
|
||||
{0, " , ,, , , \n", &exp[12 * step], 8, 0},
|
||||
|
||||
{-EINVAL, "-1", NULL, 8, 0},
|
||||
{-EINVAL, "-0", NULL, 8, 0},
|
||||
{-EINVAL, "10-1", NULL, 8, 0},
|
||||
{-EINVAL, "0-31:", NULL, 8, 0},
|
||||
{-EINVAL, "0-31:0", NULL, 8, 0},
|
||||
{-EINVAL, "0-31:0/", NULL, 8, 0},
|
||||
{-EINVAL, "0-31:0/0", NULL, 8, 0},
|
||||
{-EINVAL, "0-31:1/0", NULL, 8, 0},
|
||||
{-EINVAL, "0-31:10/1", NULL, 8, 0},
|
||||
{-EOVERFLOW, "0-98765432123456789:10/1", NULL, 8, 0},
|
||||
|
||||
{-EINVAL, "a-31", NULL, 8, 0},
|
||||
{-EINVAL, "0-a1", NULL, 8, 0},
|
||||
{-EINVAL, "a-31:10/1", NULL, 8, 0},
|
||||
{-EINVAL, "0-31:a/1", NULL, 8, 0},
|
||||
{-EINVAL, "0-\n", NULL, 8, 0},
|
||||
};
|
||||
|
||||
static void __init test_bitmap_parselist(void)
|
||||
static void __init __test_bitmap_parselist(int is_user)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
cycles_t cycles;
|
||||
ktime_t time;
|
||||
DECLARE_BITMAP(bmap, 2048);
|
||||
char *mode = is_user ? "_user" : "";
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) {
|
||||
#define ptest parselist_tests[i]
|
||||
|
||||
cycles = get_cycles();
|
||||
err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
|
||||
cycles = get_cycles() - cycles;
|
||||
if (is_user) {
|
||||
mm_segment_t orig_fs = get_fs();
|
||||
size_t len = strlen(ptest.in);
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
time = ktime_get();
|
||||
err = bitmap_parselist_user(ptest.in, len,
|
||||
bmap, ptest.nbits);
|
||||
time = ktime_get() - time;
|
||||
set_fs(orig_fs);
|
||||
} else {
|
||||
time = ktime_get();
|
||||
err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
|
||||
time = ktime_get() - time;
|
||||
}
|
||||
|
||||
if (err != ptest.errno) {
|
||||
pr_err("test %d: input is %s, errno is %d, expected %d\n",
|
||||
i, ptest.in, err, ptest.errno);
|
||||
pr_err("parselist%s: %d: input is %s, errno is %d, expected %d\n",
|
||||
mode, i, ptest.in, err, ptest.errno);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!err && ptest.expected
|
||||
&& !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) {
|
||||
pr_err("test %d: input is %s, result is 0x%lx, expected 0x%lx\n",
|
||||
i, ptest.in, bmap[0], *ptest.expected);
|
||||
pr_err("parselist%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
|
||||
mode, i, ptest.in, bmap[0],
|
||||
*ptest.expected);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ptest.flags & PARSE_TIME)
|
||||
pr_err("test %d: input is '%s' OK, Time: %llu\n",
|
||||
i, ptest.in,
|
||||
(unsigned long long)cycles);
|
||||
pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n",
|
||||
mode, i, ptest.in, time);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init test_bitmap_parselist(void)
|
||||
{
|
||||
__test_bitmap_parselist(0);
|
||||
}
|
||||
|
||||
static void __init test_bitmap_parselist_user(void)
|
||||
{
|
||||
__test_bitmap_parselist(1);
|
||||
}
|
||||
|
||||
#define EXP_BYTES (sizeof(exp) * 8)
|
||||
|
||||
static void __init test_bitmap_arr32(void)
|
||||
@@ -361,30 +403,17 @@ static void noinline __init test_mem_optimisations(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int __init test_bitmap_init(void)
|
||||
static void __init selftest(void)
|
||||
{
|
||||
test_zero_clear();
|
||||
test_fill_set();
|
||||
test_copy();
|
||||
test_bitmap_arr32();
|
||||
test_bitmap_parselist();
|
||||
test_bitmap_parselist_user();
|
||||
test_mem_optimisations();
|
||||
|
||||
if (failed_tests == 0)
|
||||
pr_info("all %u tests passed\n", total_tests);
|
||||
else
|
||||
pr_warn("failed %u out of %u tests\n",
|
||||
failed_tests, total_tests);
|
||||
|
||||
return failed_tests ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static void __exit test_bitmap_cleanup(void)
|
||||
{
|
||||
}
|
||||
|
||||
module_init(test_bitmap_init);
|
||||
module_exit(test_bitmap_cleanup);
|
||||
|
||||
KSTM_MODULE_LOADERS(test_bitmap);
|
||||
MODULE_AUTHOR("david decotigny <david.decotigny@googlers.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -21,6 +21,8 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include "../tools/testing/selftests/kselftest_module.h"
|
||||
|
||||
#define BUF_SIZE 256
|
||||
#define PAD_SIZE 16
|
||||
#define FILL_CHAR '$'
|
||||
@@ -239,6 +241,7 @@ plain_format(void)
|
||||
#define PTR ((void *)0x456789ab)
|
||||
#define PTR_STR "456789ab"
|
||||
#define PTR_VAL_NO_CRNG "(ptrval)"
|
||||
#define ZEROS ""
|
||||
|
||||
static int __init
|
||||
plain_format(void)
|
||||
@@ -268,7 +271,6 @@ plain_hash_to_buffer(const void *p, char *buf, size_t len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int __init
|
||||
plain_hash(void)
|
||||
{
|
||||
@@ -325,6 +327,24 @@ test_hashed(const char *fmt, const void *p)
|
||||
test(buf, fmt, p);
|
||||
}
|
||||
|
||||
static void __init
|
||||
null_pointer(void)
|
||||
{
|
||||
test_hashed("%p", NULL);
|
||||
test(ZEROS "00000000", "%px", NULL);
|
||||
test("(null)", "%pE", NULL);
|
||||
}
|
||||
|
||||
#define PTR_INVALID ((void *)0x000000ab)
|
||||
|
||||
static void __init
|
||||
invalid_pointer(void)
|
||||
{
|
||||
test_hashed("%p", PTR_INVALID);
|
||||
test(ZEROS "000000ab", "%px", PTR_INVALID);
|
||||
test("(efault)", "%pE", PTR_INVALID);
|
||||
}
|
||||
|
||||
static void __init
|
||||
symbol_ptr(void)
|
||||
{
|
||||
@@ -462,8 +482,7 @@ struct_rtc_time(void)
|
||||
.tm_year = 118,
|
||||
};
|
||||
|
||||
test_hashed("%pt", &tm);
|
||||
|
||||
test("(%ptR?)", "%pt", &tm);
|
||||
test("2018-11-26T05:35:43", "%ptR", &tm);
|
||||
test("0118-10-26T05:35:43", "%ptRr", &tm);
|
||||
test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm);
|
||||
@@ -481,14 +500,14 @@ static void __init
|
||||
large_bitmap(void)
|
||||
{
|
||||
const int nbits = 1 << 16;
|
||||
unsigned long *bits = kcalloc(BITS_TO_LONGS(nbits), sizeof(long), GFP_KERNEL);
|
||||
unsigned long *bits = bitmap_zalloc(nbits, GFP_KERNEL);
|
||||
if (!bits)
|
||||
return;
|
||||
|
||||
bitmap_set(bits, 1, 20);
|
||||
bitmap_set(bits, 60000, 15);
|
||||
test("1-20,60000-60014", "%*pbl", nbits, bits);
|
||||
kfree(bits);
|
||||
bitmap_free(bits);
|
||||
}
|
||||
|
||||
static void __init
|
||||
@@ -572,6 +591,8 @@ static void __init
|
||||
test_pointer(void)
|
||||
{
|
||||
plain();
|
||||
null_pointer();
|
||||
invalid_pointer();
|
||||
symbol_ptr();
|
||||
kernel_ptr();
|
||||
struct_resource();
|
||||
@@ -590,12 +611,11 @@ test_pointer(void)
|
||||
flags();
|
||||
}
|
||||
|
||||
static int __init
|
||||
test_printf_init(void)
|
||||
static void __init selftest(void)
|
||||
{
|
||||
alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL);
|
||||
if (!alloced_buffer)
|
||||
return -ENOMEM;
|
||||
return;
|
||||
test_buffer = alloced_buffer + PAD_SIZE;
|
||||
|
||||
test_basic();
|
||||
@@ -604,16 +624,8 @@ test_printf_init(void)
|
||||
test_pointer();
|
||||
|
||||
kfree(alloced_buffer);
|
||||
|
||||
if (failed_tests == 0)
|
||||
pr_info("all %u tests passed\n", total_tests);
|
||||
else
|
||||
pr_warn("failed %u out of %u tests\n", failed_tests, total_tests);
|
||||
|
||||
return failed_tests ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
module_init(test_printf_init);
|
||||
|
||||
KSTM_MODULE_LOADERS(test_printf);
|
||||
MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -500,7 +500,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
|
||||
struct rhash_head *pos, *next;
|
||||
struct test_obj_rhl *p;
|
||||
|
||||
pos = rht_dereference(tbl->buckets[i], ht);
|
||||
pos = rht_ptr_exclusive(tbl->buckets + i);
|
||||
next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
|
||||
|
||||
if (!rht_is_a_nulls(pos)) {
|
||||
|
150
lib/test_strscpy.c
Normal file
150
lib/test_strscpy.c
Normal file
@@ -0,0 +1,150 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "../tools/testing/selftests/kselftest_module.h"
|
||||
|
||||
/*
|
||||
* Kernel module for testing 'strscpy' family of functions.
|
||||
*/
|
||||
|
||||
KSTM_MODULE_GLOBALS();
|
||||
|
||||
/*
|
||||
* tc() - Run a specific test case.
|
||||
* @src: Source string, argument to strscpy_pad()
|
||||
* @count: Size of destination buffer, argument to strscpy_pad()
|
||||
* @expected: Expected return value from call to strscpy_pad()
|
||||
* @terminator: 1 if there should be a terminating null byte 0 otherwise.
|
||||
* @chars: Number of characters from the src string expected to be
|
||||
* written to the dst buffer.
|
||||
* @pad: Number of pad characters expected (in the tail of dst buffer).
|
||||
* (@pad does not include the null terminator byte.)
|
||||
*
|
||||
* Calls strscpy_pad() and verifies the return value and state of the
|
||||
* destination buffer after the call returns.
|
||||
*/
|
||||
static int __init tc(char *src, int count, int expected,
|
||||
int chars, int terminator, int pad)
|
||||
{
|
||||
int nr_bytes_poison;
|
||||
int max_expected;
|
||||
int max_count;
|
||||
int written;
|
||||
char buf[6];
|
||||
int index, i;
|
||||
const char POISON = 'z';
|
||||
|
||||
total_tests++;
|
||||
|
||||
if (!src) {
|
||||
pr_err("null source string not supported\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memset(buf, POISON, sizeof(buf));
|
||||
/* Future proofing test suite, validate args */
|
||||
max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
|
||||
max_expected = count - 1; /* Space for the null */
|
||||
if (count > max_count) {
|
||||
pr_err("count (%d) is too big (%d) ... aborting", count, max_count);
|
||||
return -1;
|
||||
}
|
||||
if (expected > max_expected) {
|
||||
pr_warn("expected (%d) is bigger than can possibly be returned (%d)",
|
||||
expected, max_expected);
|
||||
}
|
||||
|
||||
written = strscpy_pad(buf, src, count);
|
||||
if ((written) != (expected)) {
|
||||
pr_err("%d != %d (written, expected)\n", written, expected);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (count && written == -E2BIG) {
|
||||
if (strncmp(buf, src, count - 1) != 0) {
|
||||
pr_err("buffer state invalid for -E2BIG\n");
|
||||
goto fail;
|
||||
}
|
||||
if (buf[count - 1] != '\0') {
|
||||
pr_err("too big string is not null terminated correctly\n");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < chars; i++) {
|
||||
if (buf[i] != src[i]) {
|
||||
pr_err("buf[i]==%c != src[i]==%c\n", buf[i], src[i]);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (terminator) {
|
||||
if (buf[count - 1] != '\0') {
|
||||
pr_err("string is not null terminated correctly\n");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < pad; i++) {
|
||||
index = chars + terminator + i;
|
||||
if (buf[index] != '\0') {
|
||||
pr_err("padding missing at index: %d\n", i);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
|
||||
for (i = 0; i < nr_bytes_poison; i++) {
|
||||
index = sizeof(buf) - 1 - i; /* Check from the end back */
|
||||
if (buf[index] != POISON) {
|
||||
pr_err("poison value missing at index: %d\n", i);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
failed_tests++;
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void __init selftest(void)
|
||||
{
|
||||
/*
|
||||
* tc() uses a destination buffer of size 6 and needs at
|
||||
* least 2 characters spare (one for null and one to check for
|
||||
* overflow). This means we should only call tc() with
|
||||
* strings up to a maximum of 4 characters long and 'count'
|
||||
* should not exceed 4. To test with longer strings increase
|
||||
* the buffer size in tc().
|
||||
*/
|
||||
|
||||
/* tc(src, count, expected, chars, terminator, pad) */
|
||||
KSTM_CHECK_ZERO(tc("a", 0, -E2BIG, 0, 0, 0));
|
||||
KSTM_CHECK_ZERO(tc("", 0, -E2BIG, 0, 0, 0));
|
||||
|
||||
KSTM_CHECK_ZERO(tc("a", 1, -E2BIG, 0, 1, 0));
|
||||
KSTM_CHECK_ZERO(tc("", 1, 0, 0, 1, 0));
|
||||
|
||||
KSTM_CHECK_ZERO(tc("ab", 2, -E2BIG, 1, 1, 0));
|
||||
KSTM_CHECK_ZERO(tc("a", 2, 1, 1, 1, 0));
|
||||
KSTM_CHECK_ZERO(tc("", 2, 0, 0, 1, 1));
|
||||
|
||||
KSTM_CHECK_ZERO(tc("abc", 3, -E2BIG, 2, 1, 0));
|
||||
KSTM_CHECK_ZERO(tc("ab", 3, 2, 2, 1, 0));
|
||||
KSTM_CHECK_ZERO(tc("a", 3, 1, 1, 1, 1));
|
||||
KSTM_CHECK_ZERO(tc("", 3, 0, 0, 1, 2));
|
||||
|
||||
KSTM_CHECK_ZERO(tc("abcd", 4, -E2BIG, 3, 1, 0));
|
||||
KSTM_CHECK_ZERO(tc("abc", 4, 3, 3, 1, 0));
|
||||
KSTM_CHECK_ZERO(tc("ab", 4, 2, 2, 1, 1));
|
||||
KSTM_CHECK_ZERO(tc("a", 4, 1, 1, 1, 2));
|
||||
KSTM_CHECK_ZERO(tc("", 4, 0, 0, 1, 3));
|
||||
}
|
||||
|
||||
KSTM_MODULE_LOADERS(test_strscpy);
|
||||
MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
|
||||
MODULE_LICENSE("GPL");
|
@@ -47,6 +47,9 @@ struct test_sysctl_data {
|
||||
unsigned int uint_0001;
|
||||
|
||||
char string_0001[65];
|
||||
|
||||
#define SYSCTL_TEST_BITMAP_SIZE 65536
|
||||
unsigned long *bitmap_0001;
|
||||
};
|
||||
|
||||
static struct test_sysctl_data test_data = {
|
||||
@@ -102,6 +105,13 @@ static struct ctl_table test_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dostring,
|
||||
},
|
||||
{
|
||||
.procname = "bitmap_0001",
|
||||
.data = &test_data.bitmap_0001,
|
||||
.maxlen = SYSCTL_TEST_BITMAP_SIZE,
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_do_large_bitmap,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
@@ -129,15 +139,21 @@ static struct ctl_table_header *test_sysctl_header;
|
||||
|
||||
static int __init test_sysctl_init(void)
|
||||
{
|
||||
test_sysctl_header = register_sysctl_table(test_sysctl_root_table);
|
||||
if (!test_sysctl_header)
|
||||
test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
|
||||
if (!test_data.bitmap_0001)
|
||||
return -ENOMEM;
|
||||
test_sysctl_header = register_sysctl_table(test_sysctl_root_table);
|
||||
if (!test_sysctl_header) {
|
||||
kfree(test_data.bitmap_0001);
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
late_initcall(test_sysctl_init);
|
||||
|
||||
static void __exit test_sysctl_exit(void)
|
||||
{
|
||||
kfree(test_data.bitmap_0001);
|
||||
if (test_sysctl_header)
|
||||
unregister_sysctl_table(test_sysctl_header);
|
||||
}
|
||||
|
@@ -384,12 +384,11 @@ static int test_func(void *private)
|
||||
{
|
||||
struct test_driver *t = private;
|
||||
int random_array[ARRAY_SIZE(test_case_array)];
|
||||
int index, i, j, ret;
|
||||
int index, i, j;
|
||||
ktime_t kt;
|
||||
u64 delta;
|
||||
|
||||
ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
|
||||
if (ret < 0)
|
||||
if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0)
|
||||
pr_err("Failed to set affinity to %d CPU\n", t->cpu);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
|
||||
@@ -415,8 +414,7 @@ static int test_func(void *private)
|
||||
|
||||
kt = ktime_get();
|
||||
for (j = 0; j < test_repeat_count; j++) {
|
||||
ret = test_case_array[index].test_func();
|
||||
if (!ret)
|
||||
if (!test_case_array[index].test_func())
|
||||
per_cpu_test_data[t->cpu][index].test_passed++;
|
||||
else
|
||||
per_cpu_test_data[t->cpu][index].test_failed++;
|
||||
|
428
lib/vsprintf.c
428
lib/vsprintf.c
@@ -593,15 +593,13 @@ char *widen_string(char *buf, int n, char *end, struct printf_spec spec)
|
||||
return buf;
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *string(char *buf, char *end, const char *s, struct printf_spec spec)
|
||||
/* Handle string from a well known address. */
|
||||
static char *string_nocheck(char *buf, char *end, const char *s,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
int len = 0;
|
||||
size_t lim = spec.precision;
|
||||
|
||||
if ((unsigned long)s < PAGE_SIZE)
|
||||
s = "(null)";
|
||||
|
||||
while (lim--) {
|
||||
char c = *s++;
|
||||
if (!c)
|
||||
@@ -614,9 +612,64 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
|
||||
return widen_string(buf, len, end, spec);
|
||||
}
|
||||
|
||||
/* Be careful: error messages must fit into the given buffer. */
|
||||
static char *error_string(char *buf, char *end, const char *s,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
/*
|
||||
* Hard limit to avoid a completely insane messages. It actually
|
||||
* works pretty well because most error messages are in
|
||||
* the many pointer format modifiers.
|
||||
*/
|
||||
if (spec.precision == -1)
|
||||
spec.precision = 2 * sizeof(void *);
|
||||
|
||||
return string_nocheck(buf, end, s, spec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not call any complex external code here. Nested printk()/vsprintf()
|
||||
* might cause infinite loops. Failures might break printk() and would
|
||||
* be hard to debug.
|
||||
*/
|
||||
static const char *check_pointer_msg(const void *ptr)
|
||||
{
|
||||
if (!ptr)
|
||||
return "(null)";
|
||||
|
||||
if ((unsigned long)ptr < PAGE_SIZE || IS_ERR_VALUE(ptr))
|
||||
return "(efault)";
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int check_pointer(char **buf, char *end, const void *ptr,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
const char *err_msg;
|
||||
|
||||
err_msg = check_pointer_msg(ptr);
|
||||
if (err_msg) {
|
||||
*buf = error_string(*buf, end, err_msg, spec);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *pointer_string(char *buf, char *end, const void *ptr,
|
||||
struct printf_spec spec)
|
||||
char *string(char *buf, char *end, const char *s,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
if (check_pointer(&buf, end, s, spec))
|
||||
return buf;
|
||||
|
||||
return string_nocheck(buf, end, s, spec);
|
||||
}
|
||||
|
||||
static char *pointer_string(char *buf, char *end,
|
||||
const void *ptr,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
spec.base = 16;
|
||||
spec.flags |= SMALL;
|
||||
@@ -701,7 +754,7 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
|
||||
if (static_branch_unlikely(¬_filled_random_ptr_key)) {
|
||||
spec.field_width = 2 * sizeof(ptr);
|
||||
/* string length must be less than default_width */
|
||||
return string(buf, end, str, spec);
|
||||
return error_string(buf, end, str, spec);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
@@ -717,6 +770,55 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
|
||||
return pointer_string(buf, end, (const void *)hashval, spec);
|
||||
}
|
||||
|
||||
int kptr_restrict __read_mostly;
|
||||
|
||||
static noinline_for_stack
|
||||
char *restricted_pointer(char *buf, char *end, const void *ptr,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
switch (kptr_restrict) {
|
||||
case 0:
|
||||
/* Handle as %p, hash and do _not_ leak addresses. */
|
||||
return ptr_to_id(buf, end, ptr, spec);
|
||||
case 1: {
|
||||
const struct cred *cred;
|
||||
|
||||
/*
|
||||
* kptr_restrict==1 cannot be used in IRQ context
|
||||
* because its test for CAP_SYSLOG would be meaningless.
|
||||
*/
|
||||
if (in_irq() || in_serving_softirq() || in_nmi()) {
|
||||
if (spec.field_width == -1)
|
||||
spec.field_width = 2 * sizeof(ptr);
|
||||
return error_string(buf, end, "pK-error", spec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only print the real pointer value if the current
|
||||
* process has CAP_SYSLOG and is running with the
|
||||
* same credentials it started with. This is because
|
||||
* access to files is checked at open() time, but %pK
|
||||
* checks permission at read() time. We don't want to
|
||||
* leak pointer values if a binary opens a file using
|
||||
* %pK and then elevates privileges before reading it.
|
||||
*/
|
||||
cred = current_cred();
|
||||
if (!has_capability_noaudit(current, CAP_SYSLOG) ||
|
||||
!uid_eq(cred->euid, cred->uid) ||
|
||||
!gid_eq(cred->egid, cred->gid))
|
||||
ptr = NULL;
|
||||
break;
|
||||
}
|
||||
case 2:
|
||||
default:
|
||||
/* Always print 0's for %pK */
|
||||
ptr = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
return pointer_string(buf, end, ptr, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
|
||||
const char *fmt)
|
||||
@@ -736,6 +838,11 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < depth; i++, d = p) {
|
||||
if (check_pointer(&buf, end, d, spec)) {
|
||||
rcu_read_unlock();
|
||||
return buf;
|
||||
}
|
||||
|
||||
p = READ_ONCE(d->d_parent);
|
||||
array[i] = READ_ONCE(d->d_name.name);
|
||||
if (p == d) {
|
||||
@@ -766,8 +873,12 @@ static noinline_for_stack
|
||||
char *bdev_name(char *buf, char *end, struct block_device *bdev,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
{
|
||||
struct gendisk *hd = bdev->bd_disk;
|
||||
|
||||
struct gendisk *hd;
|
||||
|
||||
if (check_pointer(&buf, end, bdev, spec))
|
||||
return buf;
|
||||
|
||||
hd = bdev->bd_disk;
|
||||
buf = string(buf, end, hd->disk_name, spec);
|
||||
if (bdev->bd_part->partno) {
|
||||
if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
|
||||
@@ -802,7 +913,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
|
||||
else
|
||||
sprint_symbol_no_offset(sym, value);
|
||||
|
||||
return string(buf, end, sym, spec);
|
||||
return string_nocheck(buf, end, sym, spec);
|
||||
#else
|
||||
return special_hex_number(buf, end, value, sizeof(void *));
|
||||
#endif
|
||||
@@ -886,29 +997,32 @@ char *resource_string(char *buf, char *end, struct resource *res,
|
||||
int decode = (fmt[0] == 'R') ? 1 : 0;
|
||||
const struct printf_spec *specp;
|
||||
|
||||
if (check_pointer(&buf, end, res, spec))
|
||||
return buf;
|
||||
|
||||
*p++ = '[';
|
||||
if (res->flags & IORESOURCE_IO) {
|
||||
p = string(p, pend, "io ", str_spec);
|
||||
p = string_nocheck(p, pend, "io ", str_spec);
|
||||
specp = &io_spec;
|
||||
} else if (res->flags & IORESOURCE_MEM) {
|
||||
p = string(p, pend, "mem ", str_spec);
|
||||
p = string_nocheck(p, pend, "mem ", str_spec);
|
||||
specp = &mem_spec;
|
||||
} else if (res->flags & IORESOURCE_IRQ) {
|
||||
p = string(p, pend, "irq ", str_spec);
|
||||
p = string_nocheck(p, pend, "irq ", str_spec);
|
||||
specp = &default_dec_spec;
|
||||
} else if (res->flags & IORESOURCE_DMA) {
|
||||
p = string(p, pend, "dma ", str_spec);
|
||||
p = string_nocheck(p, pend, "dma ", str_spec);
|
||||
specp = &default_dec_spec;
|
||||
} else if (res->flags & IORESOURCE_BUS) {
|
||||
p = string(p, pend, "bus ", str_spec);
|
||||
p = string_nocheck(p, pend, "bus ", str_spec);
|
||||
specp = &bus_spec;
|
||||
} else {
|
||||
p = string(p, pend, "??? ", str_spec);
|
||||
p = string_nocheck(p, pend, "??? ", str_spec);
|
||||
specp = &mem_spec;
|
||||
decode = 0;
|
||||
}
|
||||
if (decode && res->flags & IORESOURCE_UNSET) {
|
||||
p = string(p, pend, "size ", str_spec);
|
||||
p = string_nocheck(p, pend, "size ", str_spec);
|
||||
p = number(p, pend, resource_size(res), *specp);
|
||||
} else {
|
||||
p = number(p, pend, res->start, *specp);
|
||||
@@ -919,21 +1033,21 @@ char *resource_string(char *buf, char *end, struct resource *res,
|
||||
}
|
||||
if (decode) {
|
||||
if (res->flags & IORESOURCE_MEM_64)
|
||||
p = string(p, pend, " 64bit", str_spec);
|
||||
p = string_nocheck(p, pend, " 64bit", str_spec);
|
||||
if (res->flags & IORESOURCE_PREFETCH)
|
||||
p = string(p, pend, " pref", str_spec);
|
||||
p = string_nocheck(p, pend, " pref", str_spec);
|
||||
if (res->flags & IORESOURCE_WINDOW)
|
||||
p = string(p, pend, " window", str_spec);
|
||||
p = string_nocheck(p, pend, " window", str_spec);
|
||||
if (res->flags & IORESOURCE_DISABLED)
|
||||
p = string(p, pend, " disabled", str_spec);
|
||||
p = string_nocheck(p, pend, " disabled", str_spec);
|
||||
} else {
|
||||
p = string(p, pend, " flags ", str_spec);
|
||||
p = string_nocheck(p, pend, " flags ", str_spec);
|
||||
p = number(p, pend, res->flags, default_flag_spec);
|
||||
}
|
||||
*p++ = ']';
|
||||
*p = '\0';
|
||||
|
||||
return string(buf, end, sym, spec);
|
||||
return string_nocheck(buf, end, sym, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
@@ -948,9 +1062,8 @@ char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
|
||||
/* nothing to print */
|
||||
return buf;
|
||||
|
||||
if (ZERO_OR_NULL_PTR(addr))
|
||||
/* NULL pointer */
|
||||
return string(buf, end, NULL, spec);
|
||||
if (check_pointer(&buf, end, addr, spec))
|
||||
return buf;
|
||||
|
||||
switch (fmt[1]) {
|
||||
case 'C':
|
||||
@@ -997,6 +1110,9 @@ char *bitmap_string(char *buf, char *end, unsigned long *bitmap,
|
||||
int i, chunksz;
|
||||
bool first = true;
|
||||
|
||||
if (check_pointer(&buf, end, bitmap, spec))
|
||||
return buf;
|
||||
|
||||
/* reused to print numbers */
|
||||
spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 };
|
||||
|
||||
@@ -1038,6 +1154,9 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
|
||||
int cur, rbot, rtop;
|
||||
bool first = true;
|
||||
|
||||
if (check_pointer(&buf, end, bitmap, spec))
|
||||
return buf;
|
||||
|
||||
rbot = cur = find_first_bit(bitmap, nr_bits);
|
||||
while (cur < nr_bits) {
|
||||
rtop = cur;
|
||||
@@ -1076,6 +1195,9 @@ char *mac_address_string(char *buf, char *end, u8 *addr,
|
||||
char separator;
|
||||
bool reversed = false;
|
||||
|
||||
if (check_pointer(&buf, end, addr, spec))
|
||||
return buf;
|
||||
|
||||
switch (fmt[1]) {
|
||||
case 'F':
|
||||
separator = '-';
|
||||
@@ -1101,7 +1223,7 @@ char *mac_address_string(char *buf, char *end, u8 *addr,
|
||||
}
|
||||
*p = '\0';
|
||||
|
||||
return string(buf, end, mac_addr, spec);
|
||||
return string_nocheck(buf, end, mac_addr, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
@@ -1264,7 +1386,7 @@ char *ip6_addr_string(char *buf, char *end, const u8 *addr,
|
||||
else
|
||||
ip6_string(ip6_addr, addr, fmt);
|
||||
|
||||
return string(buf, end, ip6_addr, spec);
|
||||
return string_nocheck(buf, end, ip6_addr, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
@@ -1275,7 +1397,7 @@ char *ip4_addr_string(char *buf, char *end, const u8 *addr,
|
||||
|
||||
ip4_string(ip4_addr, addr, fmt);
|
||||
|
||||
return string(buf, end, ip4_addr, spec);
|
||||
return string_nocheck(buf, end, ip4_addr, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
@@ -1337,7 +1459,7 @@ char *ip6_addr_string_sa(char *buf, char *end, const struct sockaddr_in6 *sa,
|
||||
}
|
||||
*p = '\0';
|
||||
|
||||
return string(buf, end, ip6_addr, spec);
|
||||
return string_nocheck(buf, end, ip6_addr, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
@@ -1372,7 +1494,42 @@ char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa,
|
||||
}
|
||||
*p = '\0';
|
||||
|
||||
return string(buf, end, ip4_addr, spec);
|
||||
return string_nocheck(buf, end, ip4_addr, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *ip_addr_string(char *buf, char *end, const void *ptr,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
{
|
||||
char *err_fmt_msg;
|
||||
|
||||
if (check_pointer(&buf, end, ptr, spec))
|
||||
return buf;
|
||||
|
||||
switch (fmt[1]) {
|
||||
case '6':
|
||||
return ip6_addr_string(buf, end, ptr, spec, fmt);
|
||||
case '4':
|
||||
return ip4_addr_string(buf, end, ptr, spec, fmt);
|
||||
case 'S': {
|
||||
const union {
|
||||
struct sockaddr raw;
|
||||
struct sockaddr_in v4;
|
||||
struct sockaddr_in6 v6;
|
||||
} *sa = ptr;
|
||||
|
||||
switch (sa->raw.sa_family) {
|
||||
case AF_INET:
|
||||
return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
|
||||
case AF_INET6:
|
||||
return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
|
||||
default:
|
||||
return error_string(buf, end, "(einval)", spec);
|
||||
}}
|
||||
}
|
||||
|
||||
err_fmt_msg = fmt[0] == 'i' ? "(%pi?)" : "(%pI?)";
|
||||
return error_string(buf, end, err_fmt_msg, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
@@ -1387,9 +1544,8 @@ char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
|
||||
if (spec.field_width == 0)
|
||||
return buf; /* nothing to print */
|
||||
|
||||
if (ZERO_OR_NULL_PTR(addr))
|
||||
return string(buf, end, NULL, spec); /* NULL pointer */
|
||||
|
||||
if (check_pointer(&buf, end, addr, spec))
|
||||
return buf;
|
||||
|
||||
do {
|
||||
switch (fmt[count++]) {
|
||||
@@ -1435,6 +1591,21 @@ char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
|
||||
return buf;
|
||||
}
|
||||
|
||||
static char *va_format(char *buf, char *end, struct va_format *va_fmt,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
{
|
||||
va_list va;
|
||||
|
||||
if (check_pointer(&buf, end, va_fmt, spec))
|
||||
return buf;
|
||||
|
||||
va_copy(va, *va_fmt->va);
|
||||
buf += vsnprintf(buf, end > buf ? end - buf : 0, va_fmt->fmt, va);
|
||||
va_end(va);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *uuid_string(char *buf, char *end, const u8 *addr,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
@@ -1445,6 +1616,9 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
|
||||
const u8 *index = uuid_index;
|
||||
bool uc = false;
|
||||
|
||||
if (check_pointer(&buf, end, addr, spec))
|
||||
return buf;
|
||||
|
||||
switch (*(++fmt)) {
|
||||
case 'L':
|
||||
uc = true; /* fall-through */
|
||||
@@ -1473,56 +1647,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
|
||||
|
||||
*p = 0;
|
||||
|
||||
return string(buf, end, uuid, spec);
|
||||
}
|
||||
|
||||
int kptr_restrict __read_mostly;
|
||||
|
||||
static noinline_for_stack
|
||||
char *restricted_pointer(char *buf, char *end, const void *ptr,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
switch (kptr_restrict) {
|
||||
case 0:
|
||||
/* Always print %pK values */
|
||||
break;
|
||||
case 1: {
|
||||
const struct cred *cred;
|
||||
|
||||
/*
|
||||
* kptr_restrict==1 cannot be used in IRQ context
|
||||
* because its test for CAP_SYSLOG would be meaningless.
|
||||
*/
|
||||
if (in_irq() || in_serving_softirq() || in_nmi()) {
|
||||
if (spec.field_width == -1)
|
||||
spec.field_width = 2 * sizeof(ptr);
|
||||
return string(buf, end, "pK-error", spec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only print the real pointer value if the current
|
||||
* process has CAP_SYSLOG and is running with the
|
||||
* same credentials it started with. This is because
|
||||
* access to files is checked at open() time, but %pK
|
||||
* checks permission at read() time. We don't want to
|
||||
* leak pointer values if a binary opens a file using
|
||||
* %pK and then elevates privileges before reading it.
|
||||
*/
|
||||
cred = current_cred();
|
||||
if (!has_capability_noaudit(current, CAP_SYSLOG) ||
|
||||
!uid_eq(cred->euid, cred->uid) ||
|
||||
!gid_eq(cred->egid, cred->gid))
|
||||
ptr = NULL;
|
||||
break;
|
||||
}
|
||||
case 2:
|
||||
default:
|
||||
/* Always print 0's for %pK */
|
||||
ptr = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
return pointer_string(buf, end, ptr, spec);
|
||||
return string_nocheck(buf, end, uuid, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
@@ -1532,24 +1657,31 @@ char *netdev_bits(char *buf, char *end, const void *addr,
|
||||
unsigned long long num;
|
||||
int size;
|
||||
|
||||
if (check_pointer(&buf, end, addr, spec))
|
||||
return buf;
|
||||
|
||||
switch (fmt[1]) {
|
||||
case 'F':
|
||||
num = *(const netdev_features_t *)addr;
|
||||
size = sizeof(netdev_features_t);
|
||||
break;
|
||||
default:
|
||||
return ptr_to_id(buf, end, addr, spec);
|
||||
return error_string(buf, end, "(%pN?)", spec);
|
||||
}
|
||||
|
||||
return special_hex_number(buf, end, num, size);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *address_val(char *buf, char *end, const void *addr, const char *fmt)
|
||||
char *address_val(char *buf, char *end, const void *addr,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
{
|
||||
unsigned long long num;
|
||||
int size;
|
||||
|
||||
if (check_pointer(&buf, end, addr, spec))
|
||||
return buf;
|
||||
|
||||
switch (fmt[1]) {
|
||||
case 'd':
|
||||
num = *(const dma_addr_t *)addr;
|
||||
@@ -1601,12 +1733,16 @@ char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r)
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *rtc_str(char *buf, char *end, const struct rtc_time *tm, const char *fmt)
|
||||
char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
{
|
||||
bool have_t = true, have_d = true;
|
||||
bool raw = false;
|
||||
int count = 2;
|
||||
|
||||
if (check_pointer(&buf, end, tm, spec))
|
||||
return buf;
|
||||
|
||||
switch (fmt[count]) {
|
||||
case 'd':
|
||||
have_t = false;
|
||||
@@ -1640,9 +1776,9 @@ char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
|
||||
{
|
||||
switch (fmt[1]) {
|
||||
case 'R':
|
||||
return rtc_str(buf, end, (const struct rtc_time *)ptr, fmt);
|
||||
return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
|
||||
default:
|
||||
return ptr_to_id(buf, end, ptr, spec);
|
||||
return error_string(buf, end, "(%ptR?)", spec);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1650,8 +1786,11 @@ static noinline_for_stack
|
||||
char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
|
||||
const char *fmt)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_HAVE_CLK) || !clk)
|
||||
return string(buf, end, NULL, spec);
|
||||
if (!IS_ENABLED(CONFIG_HAVE_CLK))
|
||||
return error_string(buf, end, "(%pC?)", spec);
|
||||
|
||||
if (check_pointer(&buf, end, clk, spec))
|
||||
return buf;
|
||||
|
||||
switch (fmt[1]) {
|
||||
case 'n':
|
||||
@@ -1659,7 +1798,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
|
||||
#ifdef CONFIG_COMMON_CLK
|
||||
return string(buf, end, __clk_get_name(clk), spec);
|
||||
#else
|
||||
return ptr_to_id(buf, end, clk, spec);
|
||||
return error_string(buf, end, "(%pC?)", spec);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@@ -1692,11 +1831,15 @@ char *format_flags(char *buf, char *end, unsigned long flags,
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt)
|
||||
char *flags_string(char *buf, char *end, void *flags_ptr,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
{
|
||||
unsigned long flags;
|
||||
const struct trace_print_flags *names;
|
||||
|
||||
if (check_pointer(&buf, end, flags_ptr, spec))
|
||||
return buf;
|
||||
|
||||
switch (fmt[1]) {
|
||||
case 'p':
|
||||
flags = *(unsigned long *)flags_ptr;
|
||||
@@ -1713,8 +1856,7 @@ char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt)
|
||||
names = gfpflag_names;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Unsupported flags modifier: %c\n", fmt[1]);
|
||||
return buf;
|
||||
return error_string(buf, end, "(%pG?)", spec);
|
||||
}
|
||||
|
||||
return format_flags(buf, end, flags, names);
|
||||
@@ -1736,13 +1878,13 @@ char *device_node_gen_full_name(const struct device_node *np, char *buf, char *e
|
||||
|
||||
/* special case for root node */
|
||||
if (!parent)
|
||||
return string(buf, end, "/", default_str_spec);
|
||||
return string_nocheck(buf, end, "/", default_str_spec);
|
||||
|
||||
for (depth = 0; parent->parent; depth++)
|
||||
parent = parent->parent;
|
||||
|
||||
for ( ; depth >= 0; depth--) {
|
||||
buf = string(buf, end, "/", default_str_spec);
|
||||
buf = string_nocheck(buf, end, "/", default_str_spec);
|
||||
buf = string(buf, end, device_node_name_for_depth(np, depth),
|
||||
default_str_spec);
|
||||
}
|
||||
@@ -1770,10 +1912,10 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
|
||||
str_spec.field_width = -1;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_OF))
|
||||
return string(buf, end, "(!OF)", spec);
|
||||
return error_string(buf, end, "(%pOF?)", spec);
|
||||
|
||||
if ((unsigned long)dn < PAGE_SIZE)
|
||||
return string(buf, end, "(null)", spec);
|
||||
if (check_pointer(&buf, end, dn, spec))
|
||||
return buf;
|
||||
|
||||
/* simple case without anything any more format specifiers */
|
||||
fmt++;
|
||||
@@ -1814,7 +1956,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
|
||||
tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-';
|
||||
tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-';
|
||||
tbuf[4] = 0;
|
||||
buf = string(buf, end, tbuf, str_spec);
|
||||
buf = string_nocheck(buf, end, tbuf, str_spec);
|
||||
break;
|
||||
case 'c': /* major compatible string */
|
||||
ret = of_property_read_string(dn, "compatible", &p);
|
||||
@@ -1825,10 +1967,10 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
|
||||
has_mult = false;
|
||||
of_property_for_each_string(dn, "compatible", prop, p) {
|
||||
if (has_mult)
|
||||
buf = string(buf, end, ",", str_spec);
|
||||
buf = string(buf, end, "\"", str_spec);
|
||||
buf = string_nocheck(buf, end, ",", str_spec);
|
||||
buf = string_nocheck(buf, end, "\"", str_spec);
|
||||
buf = string(buf, end, p, str_spec);
|
||||
buf = string(buf, end, "\"", str_spec);
|
||||
buf = string_nocheck(buf, end, "\"", str_spec);
|
||||
|
||||
has_mult = true;
|
||||
}
|
||||
@@ -1841,6 +1983,17 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
|
||||
return widen_string(buf, buf - buf_start, end, spec);
|
||||
}
|
||||
|
||||
static char *kobject_string(char *buf, char *end, void *ptr,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
{
|
||||
switch (fmt[1]) {
|
||||
case 'F':
|
||||
return device_node_string(buf, end, ptr, spec, fmt + 1);
|
||||
}
|
||||
|
||||
return error_string(buf, end, "(%pO?)", spec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Show a '%p' thing. A kernel extension is that the '%p' is followed
|
||||
* by an extra set of alphanumeric characters that are extended format
|
||||
@@ -1957,18 +2110,6 @@ static noinline_for_stack
|
||||
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
const int default_width = 2 * sizeof(void *);
|
||||
|
||||
if (!ptr && *fmt != 'K' && *fmt != 'x') {
|
||||
/*
|
||||
* Print (null) with the same width as a pointer so it makes
|
||||
* tabular output look nice.
|
||||
*/
|
||||
if (spec.field_width == -1)
|
||||
spec.field_width = default_width;
|
||||
return string(buf, end, "(null)", spec);
|
||||
}
|
||||
|
||||
switch (*fmt) {
|
||||
case 'F':
|
||||
case 'f':
|
||||
@@ -2004,50 +2145,19 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
|
||||
* 4: 001.002.003.004
|
||||
* 6: 000102...0f
|
||||
*/
|
||||
switch (fmt[1]) {
|
||||
case '6':
|
||||
return ip6_addr_string(buf, end, ptr, spec, fmt);
|
||||
case '4':
|
||||
return ip4_addr_string(buf, end, ptr, spec, fmt);
|
||||
case 'S': {
|
||||
const union {
|
||||
struct sockaddr raw;
|
||||
struct sockaddr_in v4;
|
||||
struct sockaddr_in6 v6;
|
||||
} *sa = ptr;
|
||||
|
||||
switch (sa->raw.sa_family) {
|
||||
case AF_INET:
|
||||
return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
|
||||
case AF_INET6:
|
||||
return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
|
||||
default:
|
||||
return string(buf, end, "(invalid address)", spec);
|
||||
}}
|
||||
}
|
||||
break;
|
||||
return ip_addr_string(buf, end, ptr, spec, fmt);
|
||||
case 'E':
|
||||
return escaped_string(buf, end, ptr, spec, fmt);
|
||||
case 'U':
|
||||
return uuid_string(buf, end, ptr, spec, fmt);
|
||||
case 'V':
|
||||
{
|
||||
va_list va;
|
||||
|
||||
va_copy(va, *((struct va_format *)ptr)->va);
|
||||
buf += vsnprintf(buf, end > buf ? end - buf : 0,
|
||||
((struct va_format *)ptr)->fmt, va);
|
||||
va_end(va);
|
||||
return buf;
|
||||
}
|
||||
return va_format(buf, end, ptr, spec, fmt);
|
||||
case 'K':
|
||||
if (!kptr_restrict)
|
||||
break;
|
||||
return restricted_pointer(buf, end, ptr, spec);
|
||||
case 'N':
|
||||
return netdev_bits(buf, end, ptr, spec, fmt);
|
||||
case 'a':
|
||||
return address_val(buf, end, ptr, fmt);
|
||||
return address_val(buf, end, ptr, spec, fmt);
|
||||
case 'd':
|
||||
return dentry_name(buf, end, ptr, spec, fmt);
|
||||
case 't':
|
||||
@@ -2064,13 +2174,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
|
||||
#endif
|
||||
|
||||
case 'G':
|
||||
return flags_string(buf, end, ptr, fmt);
|
||||
return flags_string(buf, end, ptr, spec, fmt);
|
||||
case 'O':
|
||||
switch (fmt[1]) {
|
||||
case 'F':
|
||||
return device_node_string(buf, end, ptr, spec, fmt + 1);
|
||||
}
|
||||
break;
|
||||
return kobject_string(buf, end, ptr, spec, fmt);
|
||||
case 'x':
|
||||
return pointer_string(buf, end, ptr, spec);
|
||||
}
|
||||
@@ -2685,11 +2791,13 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
|
||||
|
||||
case FORMAT_TYPE_STR: {
|
||||
const char *save_str = va_arg(args, char *);
|
||||
const char *err_msg;
|
||||
size_t len;
|
||||
|
||||
if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
|
||||
|| (unsigned long)save_str < PAGE_SIZE)
|
||||
save_str = "(null)";
|
||||
err_msg = check_pointer_msg(save_str);
|
||||
if (err_msg)
|
||||
save_str = err_msg;
|
||||
|
||||
len = strlen(save_str) + 1;
|
||||
if (str + len < end)
|
||||
memcpy(str, save_str, len);
|
||||
|
@@ -259,10 +259,15 @@ ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, s
|
||||
bitD->bitContainer = *(const BYTE *)(bitD->start);
|
||||
switch (srcSize) {
|
||||
case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
|
||||
/* fall through */
|
||||
case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
|
||||
/* fall through */
|
||||
case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
|
||||
/* fall through */
|
||||
case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
|
||||
/* fall through */
|
||||
case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
|
||||
/* fall through */
|
||||
case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
|
||||
default:;
|
||||
}
|
||||
|
@@ -3182,6 +3182,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *
|
||||
zcs->outBuffFlushedSize = 0;
|
||||
zcs->stage = zcss_flush; /* pass-through to flush stage */
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case zcss_flush: {
|
||||
size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
|
||||
|
@@ -1768,6 +1768,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, c
|
||||
return 0;
|
||||
}
|
||||
dctx->expected = 0; /* not necessary to copy more */
|
||||
/* fall through */
|
||||
|
||||
case ZSTDds_decodeFrameHeader:
|
||||
memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
|
||||
@@ -2375,7 +2376,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
|
||||
}
|
||||
zds->stage = zdss_read;
|
||||
}
|
||||
/* pass-through */
|
||||
/* fall through */
|
||||
|
||||
case zdss_read: {
|
||||
size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
|
||||
@@ -2404,6 +2405,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
|
||||
zds->stage = zdss_load;
|
||||
/* pass-through */
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case zdss_load: {
|
||||
size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
|
||||
@@ -2436,6 +2438,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
|
||||
/* pass-through */
|
||||
}
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case zdss_flush: {
|
||||
size_t const toFlushSize = zds->outEnd - zds->outStart;
|
||||
|
@@ -556,7 +556,9 @@ size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, si
|
||||
n = srcSize & ~3; /* join to mod 4 */
|
||||
switch (srcSize & 3) {
|
||||
case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
|
||||
/* fall through */
|
||||
case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
|
||||
/* fall through */
|
||||
case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
|
||||
case 0:
|
||||
default:;
|
||||
|
Reference in New Issue
Block a user