Merge commit 'v3.17' into next

This commit is contained in:
James Morris
2014-11-19 21:32:12 +11:00
10177 changed files with 437869 additions and 456249 deletions

View File

@@ -51,6 +51,9 @@ config PERCPU_RWSEM
config ARCH_USE_CMPXCHG_LOCKREF
bool
config ARCH_HAS_FAST_MULTIPLIER
bool
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@@ -396,6 +399,39 @@ config CPU_RMAP
config DQL
bool
config GLOB
bool
# This actually supports modular compilation, but the module overhead
# is ridiculous for the amount of code involved. Until an out-of-tree
# driver asks for it, we'll just link it directly it into the kernel
# when required. Since we're ignoring out-of-tree users, there's also
# no need bother prompting for a manual decision:
# prompt "glob_match() function"
help
This option provides a glob_match function for performing
simple text pattern matching. It originated in the ATA code
to blacklist particular drive models, but other device drivers
may need similar functionality.
All drivers in the Linux kernel tree that require this function
should automatically select this option. Say N unless you
are compiling an out-of tree driver which tells you that it
depends on this.
config GLOB_SELFTEST
bool "glob self-test on init"
default n
depends on GLOB
help
This option enables a simple self-test of the glob_match
function on startup. It is primarily useful for people
working on the code to ensure they haven't introduced any
regressions.
It only adds a little bit of code and slows kernel boot (or
module load) by a small amount, so you're welcome to play with
it, but you probably don't need it.
#
# Netlink attribute parsing support is select'ed if needed
#
@@ -475,4 +511,11 @@ config UCS2_STRING
source "lib/fonts/Kconfig"
#
# sg chaining option
#
config ARCH_HAS_SG_CHAIN
def_bool n
endmenu

View File

@@ -15,7 +15,7 @@ config PRINTK_TIME
The behavior is also controlled by the kernel command line
parameter printk.time=1. See Documentation/kernel-parameters.txt
config DEFAULT_MESSAGE_LOGLEVEL
config MESSAGE_LOGLEVEL_DEFAULT
int "Default message log level (1-7)"
range 1 7
default "4"
@@ -143,6 +143,30 @@ config DEBUG_INFO_REDUCED
DEBUG_INFO build and compile times are reduced too.
Only works with newer gcc versions.
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
depends on DEBUG_INFO
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
because it stores the information only once on disk in .dwo
files instead of multiple times in object files and executables.
In addition the debug information is also compressed.
Requires recent gcc (4.7+) and recent gdb/binutils.
Any tool that packages or reads debug information would need
to know about the .dwo files and include them.
Incompatible with older versions of ccache.
config DEBUG_INFO_DWARF4
bool "Generate dwarf4 debuginfo"
depends on DEBUG_INFO
help
Generate dwarf4 debug info. This requires recent versions
of gcc and gdb. It makes the debug information larger.
But it significantly improves the success of resolving
variables in gdb on optimized code.
config ENABLE_WARN_DEPRECATED
bool "Enable __deprecated logic"
default y
@@ -835,7 +859,7 @@ config DEBUG_RT_MUTEXES
config RT_MUTEX_TESTER
bool "Built-in scriptable tester for rt-mutexes"
depends on DEBUG_KERNEL && RT_MUTEXES
depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
help
This option enables a rt-mutex tester.
@@ -868,6 +892,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
will test all possible w/w mutex interface abuse with the
exception of simply not acquiring all the required locks.
Note that this feature can introduce significant overhead, so
it really should not be enabled in a production or distro kernel,
even a debug kernel. If you are a driver writer, enable it. If
you are a distro, do not.
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
@@ -1008,8 +1036,13 @@ config TRACE_IRQFLAGS
either tracing or lock debugging.
config STACKTRACE
bool
bool "Stack backtrace support"
depends on STACKTRACE_SUPPORT
help
This option causes the kernel to create a /proc/pid/stack for
every process, showing its current stack trace.
It is also used by various kernel debugging features that require
stack trace generation.
config DEBUG_KOBJECT
bool "kobject debugging"
@@ -1131,20 +1164,6 @@ config PROVE_RCU_REPEATEDLY
Say N if you are unsure.
config PROVE_RCU_DELAY
bool "RCU debugging: preemptible RCU race provocation"
depends on DEBUG_KERNEL && PREEMPT_RCU
default n
help
There is a class of races that involve an unlikely preemption
of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
been set to INT_MIN. This feature inserts a delay at that
point to increase the probability of these races.
Say Y to increase probability of preemption of __rcu_read_unlock().
Say N if you are unsure.
config SPARSE_RCU_POINTER
bool "RCU debugging: sparse-based checks for pointer usage"
default n
@@ -1550,6 +1569,14 @@ config TEST_STRING_HELPERS
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
config TEST_RHASHTABLE
bool "Perform selftest on resizable hash table"
default n
help
Enable this option to test the rhashtable functions at boot.
If unsure, say N.
endmenu # runtime tests
config PROVIDE_OHCI1394_DMA_INIT
@@ -1649,6 +1676,28 @@ config TEST_BPF
If unsure, say N.
config TEST_FIRMWARE
tristate "Test firmware loading via userspace interface"
default n
depends on FW_LOADER
help
This builds the "test_firmware" module that creates a userspace
interface for testing firmware loading. This can be used to
control the triggering of firmware loading without needing an
actual firmware-using device. The contents can be rechecked by
userspace.
If unsure, say N.
config TEST_UDELAY
tristate "udelay test driver"
default n
help
This builds the "udelay_test" module that helps to make sure
that udelay() is working properly.
If unsure, say N.
source "samples/Kconfig"
source "lib/Kconfig.kgdb"

View File

@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o hash.o
percpu-refcount.o percpu_ida.o hash.o rhashtable.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
@@ -34,6 +34,7 @@ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_MODULE) += test_module.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -136,6 +137,8 @@ obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
obj-$(CONFIG_MPILIB) += mpi/
obj-$(CONFIG_SIGNATURE) += digsig.o

View File

@@ -1723,11 +1723,13 @@ ascend_old_tree:
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
if (!cursor)
goto gc_complete;
} else {
slot = node->parent_slot;
cursor = ptr;
}
BUG_ON(!ptr);
BUG_ON(!cursor);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
@@ -1735,7 +1737,7 @@ ascend_old_tree:
gc_complete:
edit->set[0].to = new_root;
assoc_array_apply_edit(edit);
edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
array->nr_leaves_on_tree = nr_leaves_on_tree;
return 0;
enomem:

View File

@@ -40,9 +40,9 @@
* for the best explanations of this ordering.
*/
int __bitmap_empty(const unsigned long *bitmap, int bits)
int __bitmap_empty(const unsigned long *bitmap, unsigned int bits)
{
int k, lim = bits/BITS_PER_LONG;
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap[k])
return 0;
@@ -55,9 +55,9 @@ int __bitmap_empty(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_empty);
int __bitmap_full(const unsigned long *bitmap, int bits)
int __bitmap_full(const unsigned long *bitmap, unsigned int bits)
{
int k, lim = bits/BITS_PER_LONG;
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (~bitmap[k])
return 0;
@@ -71,9 +71,9 @@ int __bitmap_full(const unsigned long *bitmap, int bits)
EXPORT_SYMBOL(__bitmap_full);
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
const unsigned long *bitmap2, unsigned int bits)
{
int k, lim = bits/BITS_PER_LONG;
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
return 0;
@@ -86,14 +86,14 @@ int __bitmap_equal(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_equal);
void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
int k, lim = bits/BITS_PER_LONG;
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
if (bits % BITS_PER_LONG)
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
dst[k] = ~src[k];
}
EXPORT_SYMBOL(__bitmap_complement);
@@ -182,23 +182,26 @@ void __bitmap_shift_left(unsigned long *dst,
EXPORT_SYMBOL(__bitmap_shift_left);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
const unsigned long *bitmap2, unsigned int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
unsigned int k;
unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
for (k = 0; k < nr; k++)
for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
if (bits % BITS_PER_LONG)
result |= (dst[k] = bitmap1[k] & bitmap2[k] &
BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_and);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
const unsigned long *bitmap2, unsigned int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
unsigned int k;
unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
@@ -206,10 +209,10 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_or);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
const unsigned long *bitmap2, unsigned int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
unsigned int k;
unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k];
@@ -217,22 +220,25 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_xor);
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
const unsigned long *bitmap2, unsigned int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
unsigned int k;
unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
for (k = 0; k < nr; k++)
for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
if (bits % BITS_PER_LONG)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k] &
BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_andnot);
int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
const unsigned long *bitmap2, unsigned int bits)
{
int k, lim = bits/BITS_PER_LONG;
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
return 1;
@@ -245,9 +251,9 @@ int __bitmap_intersects(const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_intersects);
int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
const unsigned long *bitmap2, unsigned int bits)
{
int k, lim = bits/BITS_PER_LONG;
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
return 0;
@@ -259,9 +265,10 @@ int __bitmap_subset(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_subset);
int __bitmap_weight(const unsigned long *bitmap, int bits)
int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
{
int k, w = 0, lim = bits/BITS_PER_LONG;
unsigned int k, lim = bits/BITS_PER_LONG;
int w = 0;
for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]);
@@ -273,42 +280,42 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
void bitmap_set(unsigned long *map, int start, int nr)
void bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
const unsigned int size = start + len;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_set >= 0) {
while (len - bits_to_set >= 0) {
*p |= mask_to_set;
nr -= bits_to_set;
len -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
if (nr) {
if (len) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
EXPORT_SYMBOL(bitmap_set);
void bitmap_clear(unsigned long *map, int start, int nr)
void bitmap_clear(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
const unsigned int size = start + len;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_clear >= 0) {
while (len - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
nr -= bits_to_clear;
len -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (nr) {
if (len) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
@@ -664,13 +671,8 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
{
char *nl = strchr(bp, '\n');
int len;
if (nl)
len = nl - bp;
else
len = strlen(bp);
char *nl = strchrnul(bp, '\n');
int len = nl - bp;
return __bitmap_parselist(bp, len, 0, maskp, nmaskbits);
}
@@ -716,7 +718,7 @@ EXPORT_SYMBOL(bitmap_parselist_user);
*
* If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively,
* and other @pos values will get mapped to 0. When @pos value 7
* and other @pos values will get mapped to -1. When @pos value 7
* gets mapped to (returns) @ord value 3 in this example, that means
* that bit 7 is the 3rd (starting with 0th) set bit in @buf.
*
@@ -1046,7 +1048,7 @@ enum {
REG_OP_RELEASE, /* clear all bits in region */
};
static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
{
int nbits_reg; /* number of bits in region */
int index; /* index first long of region in bitmap */
@@ -1112,11 +1114,11 @@ done:
* Return the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
int pos, end; /* scans bitmap by regions of size order */
unsigned int pos, end; /* scans bitmap by regions of size order */
for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
continue;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
@@ -1137,7 +1139,7 @@ EXPORT_SYMBOL(bitmap_find_free_region);
*
* No return value.
*/
void bitmap_release_region(unsigned long *bitmap, int pos, int order)
void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
__reg_op(bitmap, pos, order, REG_OP_RELEASE);
}
@@ -1154,12 +1156,11 @@ EXPORT_SYMBOL(bitmap_release_region);
* Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
return -EBUSY;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
return 0;
return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
}
EXPORT_SYMBOL(bitmap_allocate_region);

View File

@@ -121,11 +121,7 @@ EXPORT_SYMBOL(get_options);
* @retptr: (output) Optional pointer to next char after parse completes
*
* Parses a string into a number. The number stored at @ptr is
* potentially suffixed with %K (for kilobytes, or 1024 bytes),
* %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
* 1073741824). If the number is suffixed with K, M, or G, then
* the return value is the number multiplied by one kilobyte, one
* megabyte, or one gigabyte, respectively.
* potentially suffixed with K, M, G, T, P, E.
*/
unsigned long long memparse(const char *ptr, char **retptr)
@@ -135,6 +131,15 @@ unsigned long long memparse(const char *ptr, char **retptr)
unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
switch (*endptr) {
case 'E':
case 'e':
ret <<= 10;
case 'P':
case 'p':
ret <<= 10;
case 'T':
case 't':
ret <<= 10;
case 'G':
case 'g':
ret <<= 10;

View File

@@ -50,34 +50,10 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
#define GF2_DIM 32
static u32 gf2_matrix_times(u32 *mat, u32 vec)
{
u32 sum = 0;
while (vec) {
if (vec & 1)
sum ^= *mat;
vec >>= 1;
mat++;
}
return sum;
}
static void gf2_matrix_square(u32 *square, u32 *mat)
{
int i;
for (i = 0; i < GF2_DIM; i++)
square[i] = gf2_matrix_times(mat, mat[i]);
}
#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
/* implements slicing-by-4 or slicing-by-8 algorithm */
static inline u32
static inline u32 __pure
crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
{
# ifdef __LITTLE_ENDIAN
@@ -155,51 +131,6 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
}
#endif
/* For conditions of distribution and use, see copyright notice in zlib.h */
static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2,
u32 polynomial)
{
u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */
u32 odd[GF2_DIM]; /* Odd-power-of-two zeros operator */
u32 row;
int i;
if (len2 <= 0)
return crc1;
/* Put operator for one zero bit in odd */
odd[0] = polynomial;
row = 1;
for (i = 1; i < GF2_DIM; i++) {
odd[i] = row;
row <<= 1;
}
gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */
gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */
/* Apply len2 zeros to crc1 (first square will put the operator for one
* zero byte, eight zero bits, in even).
*/
do {
/* Apply zeros operator for this bit of len2 */
gf2_matrix_square(even, odd);
if (len2 & 1)
crc1 = gf2_matrix_times(even, crc1);
len2 >>= 1;
/* If no more bits set, then done */
if (len2 == 0)
break;
/* Another iteration of the loop with odd and even swapped */
gf2_matrix_square(odd, even);
if (len2 & 1)
crc1 = gf2_matrix_times(odd, crc1);
len2 >>= 1;
} while (len2 != 0);
crc1 ^= crc2;
return crc1;
}
/**
* crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
@@ -271,19 +202,81 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
}
#endif
u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
/*
* This multiplies the polynomials x and y modulo the given modulus.
* This follows the "little-endian" CRC convention that the lsbit
* represents the highest power of x, and the msbit represents x^0.
*/
static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
{
return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE);
u32 product = x & 1 ? y : 0;
int i;
for (i = 0; i < 31; i++) {
product = (product >> 1) ^ (product & 1 ? modulus : 0);
x >>= 1;
product ^= x & 1 ? y : 0;
}
return product;
}
u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
/**
* crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time
* @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
* @len: The number of bytes. @crc is multiplied by x^(8*@len)
* @polynomial: The modulus used to reduce the result to 32 bits.
*
* It's possible to parallelize CRC computations by computing a CRC
* over separate ranges of a buffer, then summing them.
* This shifts the given CRC by 8*len bits (i.e. produces the same effect
* as appending len bytes of zero to the data), in time proportional
* to log(len).
*/
static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
u32 polynomial)
{
return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE);
u32 power = polynomial; /* CRC of x^32 */
int i;
/* Shift up to 32 bits in the simple linear way */
for (i = 0; i < 8 * (int)(len & 3); i++)
crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
len >>= 2;
if (!len)
return crc;
for (;;) {
/* "power" is x^(2^i), modulo the polynomial */
if (len & 1)
crc = gf2_multiply(crc, power, polynomial);
len >>= 1;
if (!len)
break;
/* Square power, advancing to x^(2^(i+1)) */
power = gf2_multiply(power, power, polynomial);
}
return crc;
}
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(crc32_le_combine);
EXPORT_SYMBOL(__crc32c_le);
EXPORT_SYMBOL(__crc32c_le_combine);
u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
{
return crc32_generic_shift(crc, len, CRCPOLY_LE);
}
u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
{
return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
}
EXPORT_SYMBOL(crc32_le_shift);
EXPORT_SYMBOL(__crc32c_le_shift);
/**
* crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -351,7 +344,7 @@ EXPORT_SYMBOL(crc32_be);
#ifdef CONFIG_CRC32_SELFTEST
/* 4096 random bytes */
static u8 __attribute__((__aligned__(8))) test_buf[] =
static u8 const __aligned(8) test_buf[] __initconst =
{
0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
@@ -875,7 +868,7 @@ static struct crc_test {
u32 crc_le; /* expected crc32_le result */
u32 crc_be; /* expected crc32_be result */
u32 crc32c_le; /* expected crc32c_le result */
} test[] =
} const test[] __initconst =
{
{0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
{0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},

View File

@@ -54,7 +54,7 @@ static const struct compress_format compressed_formats[] __initconst = {
{ {0, 0}, NULL, NULL }
};
decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
const char **name)
{
const struct compress_format *cf;

View File

@@ -92,8 +92,8 @@ struct bunzip_data {
/* State for interrupting output loop */
int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
/* I/O tracking data (file handles, buffers, positions, etc.) */
int (*fill)(void*, unsigned int);
int inbufCount, inbufPos /*, outbufPos*/;
long (*fill)(void*, unsigned long);
long inbufCount, inbufPos /*, outbufPos*/;
unsigned char *inbuf /*,*outbuf*/;
unsigned int inbufBitCount, inbufBits;
/* The CRC values stored in the block header and calculated from the
@@ -617,7 +617,7 @@ decode_next_byte:
goto decode_next_byte;
}
static int INIT nofill(void *buf, unsigned int len)
static long INIT nofill(void *buf, unsigned long len)
{
return -1;
}
@@ -625,8 +625,8 @@ static int INIT nofill(void *buf, unsigned int len)
/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
ignored, and data is read from file handle into temporary buffer. */
static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
int (*fill)(void*, unsigned int))
static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
long (*fill)(void*, unsigned long))
{
struct bunzip_data *bd;
unsigned int i, j, c;
@@ -675,11 +675,11 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
not end of file.) */
STATIC int INIT bunzip2(unsigned char *buf, int len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
STATIC int INIT bunzip2(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *outbuf,
int *pos,
long *pos,
void(*error)(char *x))
{
struct bunzip_data *bd;
@@ -743,11 +743,11 @@ exit_0:
}
#ifdef PREBOOT
STATIC int INIT decompress(unsigned char *buf, int len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
STATIC int INIT decompress(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *outbuf,
int *pos,
long *pos,
void(*error)(char *x))
{
return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);

View File

@@ -27,17 +27,17 @@
#define GZIP_IOBUF_SIZE (16*1024)
static int INIT nofill(void *buffer, unsigned int len)
static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
/* Included from initramfs et al code */
STATIC int INIT gunzip(unsigned char *buf, int len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
STATIC int INIT gunzip(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf,
int *pos,
long *pos,
void(*error)(char *x)) {
u8 *zbuf;
struct z_stream_s *strm;
@@ -142,7 +142,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
/* Write any data generated */
if (flush && strm->next_out > out_buf) {
int l = strm->next_out - out_buf;
long l = strm->next_out - out_buf;
if (l != flush(out_buf, l)) {
rc = -1;
error("write error");

View File

@@ -31,10 +31,10 @@
#define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20)
#define ARCHIVE_MAGICNUMBER 0x184C2102
STATIC inline int INIT unlz4(u8 *input, int in_len,
int (*fill) (void *, unsigned int),
int (*flush) (void *, unsigned int),
u8 *output, int *posp,
STATIC inline int INIT unlz4(u8 *input, long in_len,
long (*fill)(void *, unsigned long),
long (*flush)(void *, unsigned long),
u8 *output, long *posp,
void (*error) (char *x))
{
int ret = -1;
@@ -43,7 +43,7 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
u8 *inp;
u8 *inp_start;
u8 *outp;
int size = in_len;
long size = in_len;
#ifdef PREBOOT
size_t out_len = get_unaligned_le32(input + in_len);
#endif
@@ -83,13 +83,20 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
if (posp)
*posp = 0;
if (fill)
fill(inp, 4);
if (fill) {
size = fill(inp, 4);
if (size < 4) {
error("data corrupted");
goto exit_2;
}
}
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
inp += 4;
size -= 4;
if (!fill) {
inp += 4;
size -= 4;
}
} else {
error("invalid header");
goto exit_2;
@@ -100,29 +107,44 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
for (;;) {
if (fill)
fill(inp, 4);
if (fill) {
size = fill(inp, 4);
if (size == 0)
break;
if (size < 4) {
error("data corrupted");
goto exit_2;
}
}
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
inp += 4;
size -= 4;
if (!fill) {
inp += 4;
size -= 4;
}
if (posp)
*posp += 4;
continue;
}
inp += 4;
size -= 4;
if (posp)
*posp += 4;
if (fill) {
if (!fill) {
inp += 4;
size -= 4;
} else {
if (chunksize > lz4_compressbound(uncomp_chunksize)) {
error("chunk length is longer than allocated");
goto exit_2;
}
fill(inp, chunksize);
size = fill(inp, chunksize);
if (size < chunksize) {
error("data corrupted");
goto exit_2;
}
}
#ifdef PREBOOT
if (out_len >= uncomp_chunksize) {
@@ -149,18 +171,17 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
if (posp)
*posp += chunksize;
size -= chunksize;
if (!fill) {
size -= chunksize;
if (size == 0)
break;
else if (size < 0) {
error("data corrupted");
goto exit_2;
if (size == 0)
break;
else if (size < 0) {
error("data corrupted");
goto exit_2;
}
inp += chunksize;
}
inp += chunksize;
if (fill)
inp = inp_start;
}
ret = 0;
@@ -175,11 +196,11 @@ exit_0:
}
#ifdef PREBOOT
STATIC int INIT decompress(unsigned char *buf, int in_len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
STATIC int INIT decompress(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *output,
int *posp,
long *posp,
void(*error)(char *x)
)
{

View File

@@ -65,11 +65,11 @@ static long long INIT read_int(unsigned char *ptr, int size)
#define LZMA_IOBUF_SIZE 0x10000
struct rc {
int (*fill)(void*, unsigned int);
long (*fill)(void*, unsigned long);
uint8_t *ptr;
uint8_t *buffer;
uint8_t *buffer_end;
int buffer_size;
long buffer_size;
uint32_t code;
uint32_t range;
uint32_t bound;
@@ -82,7 +82,7 @@ struct rc {
#define RC_MODEL_TOTAL_BITS 11
static int INIT nofill(void *buffer, unsigned int len)
static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
@@ -99,8 +99,8 @@ static void INIT rc_read(struct rc *rc)
/* Called once */
static inline void INIT rc_init(struct rc *rc,
int (*fill)(void*, unsigned int),
char *buffer, int buffer_size)
long (*fill)(void*, unsigned long),
char *buffer, long buffer_size)
{
if (fill)
rc->fill = fill;
@@ -280,7 +280,7 @@ struct writer {
size_t buffer_pos;
int bufsize;
size_t global_pos;
int(*flush)(void*, unsigned int);
long (*flush)(void*, unsigned long);
struct lzma_header *header;
};
@@ -534,11 +534,11 @@ static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
STATIC inline int INIT unlzma(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *output,
int *posp,
long *posp,
void(*error)(char *x)
)
{
@@ -667,11 +667,11 @@ exit_0:
}
#ifdef PREBOOT
STATIC int INIT decompress(unsigned char *buf, int in_len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
STATIC int INIT decompress(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *output,
int *posp,
long *posp,
void(*error)(char *x)
)
{

View File

@@ -51,7 +51,7 @@ static const unsigned char lzop_magic[] = {
#define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4)
#define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4)
STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len)
STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
{
int l;
u8 *parse = input;
@@ -108,14 +108,14 @@ STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len)
return 1;
}
STATIC inline int INIT unlzo(u8 *input, int in_len,
int (*fill) (void *, unsigned int),
int (*flush) (void *, unsigned int),
u8 *output, int *posp,
STATIC int INIT unlzo(u8 *input, long in_len,
long (*fill)(void *, unsigned long),
long (*flush)(void *, unsigned long),
u8 *output, long *posp,
void (*error) (char *x))
{
u8 r = 0;
int skip = 0;
long skip = 0;
u32 src_len, dst_len;
size_t tmp;
u8 *in_buf, *in_buf_save, *out_buf;

View File

@@ -248,10 +248,10 @@ void *memmove(void *dest, const void *src, size_t size)
* both input and output buffers are available as a single chunk, i.e. when
* fill() and flush() won't be used.
*/
STATIC int INIT unxz(unsigned char *in, int in_size,
int (*fill)(void *dest, unsigned int size),
int (*flush)(void *src, unsigned int size),
unsigned char *out, int *in_used,
STATIC int INIT unxz(unsigned char *in, long in_size,
long (*fill)(void *dest, unsigned long size),
long (*flush)(void *src, unsigned long size),
unsigned char *out, long *in_used,
void (*error)(char *x))
{
struct xz_buf b;
@@ -329,7 +329,7 @@ STATIC int INIT unxz(unsigned char *in, int in_size,
* returned by xz_dec_run(), but probably
* it's not too bad.
*/
if (flush(b.out, b.out_pos) != (int)b.out_pos)
if (flush(b.out, b.out_pos) != (long)b.out_pos)
ret = XZ_BUF_ERROR;
b.out_pos = 0;

View File

@@ -86,8 +86,6 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
}
EXPORT_SYMBOL(devm_iounmap);
#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
/**
* devm_ioremap_resource() - check, request region, and ioremap resource
* @dev: generic device to handle the resource for
@@ -142,34 +140,6 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
}
EXPORT_SYMBOL(devm_ioremap_resource);
/**
* devm_request_and_ioremap() - Check, request region, and ioremap resource
* @dev: Generic device to handle the resource for
* @res: resource to be handled
*
* Takes all necessary steps to ioremap a mem resource. Uses managed device, so
* everything is undone on driver detach. Checks arguments, so you can feed
* it the result from e.g. platform_get_resource() directly. Returns the
* remapped pointer or NULL on error. Usage example:
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_request_and_ioremap(&pdev->dev, res);
* if (!base)
* return -EADDRNOTAVAIL;
*/
void __iomem *devm_request_and_ioremap(struct device *dev,
struct resource *res)
{
void __iomem *dest_ptr;
dest_ptr = devm_ioremap_resource(dev, res);
if (IS_ERR(dest_ptr))
return NULL;
return dest_ptr;
}
EXPORT_SYMBOL(devm_request_and_ioremap);
#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Generic iomap devres

View File

@@ -614,13 +614,15 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
char buf[PREFIX_SIZE];
res = dev_printk_emit(7, dev->dev.parent,
"%s%s %s %s: %pV",
"%s%s %s %s%s: %pV",
dynamic_emit_prefix(descriptor, buf),
dev_driver_string(dev->dev.parent),
dev_name(dev->dev.parent),
netdev_name(dev), &vaf);
netdev_name(dev), netdev_reg_state(dev),
&vaf);
} else if (dev) {
res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf);
res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
netdev_reg_state(dev), &vaf);
} else {
res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
}

View File

@@ -588,6 +588,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
if (!np_pool)
return NULL;
pdev = of_find_device_by_node(np_pool);
of_node_put(np_pool);
if (!pdev)
return NULL;
return dev_get_gen_pool(&pdev->dev);

287
lib/glob.c Normal file
View File

@@ -0,0 +1,287 @@
#include <linux/module.h>
#include <linux/glob.h>
/*
* The only reason this code can be compiled as a module is because the
* ATA code that depends on it can be as well. In practice, they're
* both usually compiled in and the module overhead goes away.
*/
MODULE_DESCRIPTION("glob(7) matching");
MODULE_LICENSE("Dual MIT/GPL");
/**
* glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0)
* @pat: Shell-style pattern to match, e.g. "*.[ch]".
* @str: String to match. The pattern must match the entire string.
*
* Perform shell-style glob matching, returning true (1) if the match
* succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0).
*
* Pattern metacharacters are ?, *, [ and \.
* (And, inside character classes, !, - and ].)
*
* This is small and simple implementation intended for device blacklists
* where a string is matched against a number of patterns. Thus, it
* does not preprocess the patterns. It is non-recursive, and run-time
* is at most quadratic: strlen(@str)*strlen(@pat).
*
* An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa");
* it takes 6 passes over the pattern before matching the string.
*
* Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT
* treat / or leading . specially; it isn't actually used for pathnames.
*
* Note that according to glob(7) (and unlike bash), character classes
* are complemented by a leading !; this does not support the regex-style
* [^a-z] syntax.
*
* An opening bracket without a matching close is matched literally.
*/
bool __pure glob_match(char const *pat, char const *str)
{
/*
* Backtrack to previous * on mismatch and retry starting one
* character later in the string. Because * matches all characters
* (no exception for /), it can be easily proved that there's
* never a need to backtrack multiple levels.
*/
char const *back_pat = NULL, *back_str = back_str;
/*
* Loop over each token (character or class) in pat, matching
* it against the remaining unmatched tail of str. Return false
* on mismatch, or true after matching the trailing nul bytes.
*/
for (;;) {
unsigned char c = *str++;
unsigned char d = *pat++;
switch (d) {
case '?': /* Wildcard: anything but nul */
if (c == '\0')
return false;
break;
case '*': /* Any-length wildcard */
if (*pat == '\0') /* Optimize trailing * case */
return true;
back_pat = pat;
back_str = --str; /* Allow zero-length match */
break;
case '[': { /* Character class */
bool match = false, inverted = (*pat == '!');
char const *class = pat + inverted;
unsigned char a = *class++;
/*
* Iterate over each span in the character class.
* A span is either a single character a, or a
* range a-b. The first span may begin with ']'.
*/
do {
unsigned char b = a;
if (a == '\0') /* Malformed */
goto literal;
if (class[0] == '-' && class[1] != ']') {
b = class[1];
if (b == '\0')
goto literal;
class += 2;
/* Any special action if a > b? */
}
match |= (a <= c && c <= b);
} while ((a = *class++) != ']');
if (match == inverted)
goto backtrack;
pat = class;
}
break;
case '\\':
d = *pat++;
/*FALLTHROUGH*/
default: /* Literal character */
literal:
if (c == d) {
if (d == '\0')
return true;
break;
}
backtrack:
if (c == '\0' || !back_pat)
return false; /* No point continuing */
/* Try again from last *, one character later in str. */
pat = back_pat;
str = ++back_str;
break;
}
}
}
EXPORT_SYMBOL(glob_match);
#ifdef CONFIG_GLOB_SELFTEST
#include <linux/printk.h>
#include <linux/moduleparam.h>
/* Boot with "glob.verbose=1" to show successful tests, too */
static bool verbose = false;
module_param(verbose, bool, 0);
struct glob_test {
char const *pat, *str;
bool expected;
};
static bool __pure __init test(char const *pat, char const *str, bool expected)
{
bool match = glob_match(pat, str);
bool success = match == expected;
/* Can't get string literals into a particular section, so... */
static char const msg_error[] __initconst =
KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
static char const msg_ok[] __initconst =
KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
static char const mismatch[] __initconst = "mismatch";
char const *message;
if (!success)
message = msg_error;
else if (verbose)
message = msg_ok;
else
return success;
printk(message, pat, str, mismatch + 3*match);
return success;
}
/*
* The tests are all jammed together in one array to make it simpler
* to place that array in the .init.rodata section. The obvious
* "array of structures containing char *" has no way to force the
* pointed-to strings to be in a particular section.
*
* Anyway, a test consists of:
* 1. Expected glob_match result: '1' or '0'.
* 2. Pattern to match: null-terminated string
* 3. String to match against: null-terminated string
*
* The list of tests is terminated with a final '\0' instead of
* a glob_match result character.
*/
static char const glob_tests[] __initconst =
/* Some basic tests */
"1" "a\0" "a\0"
"0" "a\0" "b\0"
"0" "a\0" "aa\0"
"0" "a\0" "\0"
"1" "\0" "\0"
"0" "\0" "a\0"
/* Simple character class tests */
"1" "[a]\0" "a\0"
"0" "[a]\0" "b\0"
"0" "[!a]\0" "a\0"
"1" "[!a]\0" "b\0"
"1" "[ab]\0" "a\0"
"1" "[ab]\0" "b\0"
"0" "[ab]\0" "c\0"
"1" "[!ab]\0" "c\0"
"1" "[a-c]\0" "b\0"
"0" "[a-c]\0" "d\0"
/* Corner cases in character class parsing */
"1" "[a-c-e-g]\0" "-\0"
"0" "[a-c-e-g]\0" "d\0"
"1" "[a-c-e-g]\0" "f\0"
"1" "[]a-ceg-ik[]\0" "a\0"
"1" "[]a-ceg-ik[]\0" "]\0"
"1" "[]a-ceg-ik[]\0" "[\0"
"1" "[]a-ceg-ik[]\0" "h\0"
"0" "[]a-ceg-ik[]\0" "f\0"
"0" "[!]a-ceg-ik[]\0" "h\0"
"0" "[!]a-ceg-ik[]\0" "]\0"
"1" "[!]a-ceg-ik[]\0" "f\0"
/* Simple wild cards */
"1" "?\0" "a\0"
"0" "?\0" "aa\0"
"0" "??\0" "a\0"
"1" "?x?\0" "axb\0"
"0" "?x?\0" "abx\0"
"0" "?x?\0" "xab\0"
/* Asterisk wild cards (backtracking) */
"0" "*??\0" "a\0"
"1" "*??\0" "ab\0"
"1" "*??\0" "abc\0"
"1" "*??\0" "abcd\0"
"0" "??*\0" "a\0"
"1" "??*\0" "ab\0"
"1" "??*\0" "abc\0"
"1" "??*\0" "abcd\0"
"0" "?*?\0" "a\0"
"1" "?*?\0" "ab\0"
"1" "?*?\0" "abc\0"
"1" "?*?\0" "abcd\0"
"1" "*b\0" "b\0"
"1" "*b\0" "ab\0"
"0" "*b\0" "ba\0"
"1" "*b\0" "bb\0"
"1" "*b\0" "abb\0"
"1" "*b\0" "bab\0"
"1" "*bc\0" "abbc\0"
"1" "*bc\0" "bc\0"
"1" "*bc\0" "bbc\0"
"1" "*bc\0" "bcbc\0"
/* Multiple asterisks (complex backtracking) */
"1" "*ac*\0" "abacadaeafag\0"
"1" "*ac*ae*ag*\0" "abacadaeafag\0"
"1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
"0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
"1" "*abcd*\0" "abcabcabcabcdefg\0"
"1" "*ab*cd*\0" "abcabcabcabcdefg\0"
"1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
"0" "*abcd*\0" "abcabcabcabcefg\0"
"0" "*ab*cd*\0" "abcabcabcabcefg\0";
static int __init glob_init(void)
{
unsigned successes = 0;
unsigned n = 0;
char const *p = glob_tests;
static char const message[] __initconst =
KERN_INFO "glob: %u self-tests passed, %u failed\n";
/*
* Tests are jammed together in a string. The first byte is '1'
* or '0' to indicate the expected outcome, or '\0' to indicate the
* end of the tests. Then come two null-terminated strings: the
* pattern and the string to match it against.
*/
while (*p) {
bool expected = *p++ & 1;
char const *pat = p;
p += strlen(p) + 1;
successes += test(pat, p, expected);
p += strlen(p) + 1;
n++;
}
n -= successes;
printk(message, successes, n);
/* What's the errno for "kernel bug detected"? Guess... */
return n ? -ECANCELED : 0;
}
/* We need a dummy exit function to allow unload */
static void __exit glob_fini(void) { }
module_init(glob_init);
module_exit(glob_fini);
#endif /* CONFIG_GLOB_SELFTEST */

View File

@@ -11,7 +11,7 @@
unsigned int __sw_hweight32(unsigned int w)
{
#ifdef ARCH_HAS_FAST_MULTIPLIER
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
return __sw_hweight32((unsigned int)(w >> 32)) +
__sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
#ifdef ARCH_HAS_FAST_MULTIPLIER
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;

View File

@@ -590,26 +590,27 @@ static void __idr_remove_all(struct idr *idp)
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = idp->top;
*paa = idp->top;
RCU_INIT_POINTER(idp->top, NULL);
max = idr_max(idp->layers);
id = 0;
while (id >= 0 && id <= max) {
p = *paa;
while (n > IDR_BITS && p) {
n -= IDR_BITS;
*paa++ = p;
p = p->ary[(id >> n) & IDR_MASK];
*++paa = p;
}
bt_mask = id;
id += 1 << n;
/* Get the highest bit that the above add changed from 0->1. */
while (n < fls(id ^ bt_mask)) {
if (p)
free_layer(idp, p);
if (*paa)
free_layer(idp, *paa);
n += IDR_BITS;
p = *--paa;
--paa;
}
}
idp->layers = 0;
@@ -692,15 +693,16 @@ int idr_for_each(struct idr *idp,
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = rcu_dereference_raw(idp->top);
*paa = rcu_dereference_raw(idp->top);
max = idr_max(idp->layers);
id = 0;
while (id >= 0 && id <= max) {
p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
*++paa = p;
}
if (p) {
@@ -712,7 +714,7 @@ int idr_for_each(struct idr *idp,
id += 1 << n;
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
--paa;
}
}
@@ -740,17 +742,18 @@ void *idr_get_next(struct idr *idp, int *nextidp)
int n, max;
/* find first ent */
p = rcu_dereference_raw(idp->top);
p = *paa = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
n = (p->layer + 1) * IDR_BITS;
max = idr_max(p->layer + 1);
while (id >= 0 && id <= max) {
p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
*++paa = p;
}
if (p) {
@@ -768,7 +771,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
id = round_up(id + 1, 1 << n);
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
--paa;
}
}
return NULL;

View File

@@ -85,6 +85,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len)
{
/* No data? Done! */
if (len == 0)
return 0;
/* Skip over the finished iovecs */
while (offset >= iov->iov_len) {
offset -= iov->iov_len;

View File

@@ -561,8 +561,7 @@ EXPORT_SYMBOL(__kfifo_to_user_r);
unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
if (!nents)
BUG();
BUG_ON(!nents);
len = __kfifo_max_r(len, recsize);
@@ -585,8 +584,7 @@ EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
if (!nents)
BUG();
BUG_ON(!nents);
len = __kfifo_max_r(len, recsize);

View File

@@ -140,11 +140,11 @@ void klist_add_tail(struct klist_node *n, struct klist *k)
EXPORT_SYMBOL_GPL(klist_add_tail);
/**
* klist_add_after - Init a klist_node and add it after an existing node
* klist_add_behind - Init a klist_node and add it after an existing node
* @n: node we're adding.
* @pos: node to put @n after
*/
void klist_add_after(struct klist_node *n, struct klist_node *pos)
void klist_add_behind(struct klist_node *n, struct klist_node *pos)
{
struct klist *k = knode_klist(pos);
@@ -153,7 +153,7 @@ void klist_add_after(struct klist_node *n, struct klist_node *pos)
list_add(&n->n_node, &pos->n_node);
spin_unlock(&k->k_lock);
}
EXPORT_SYMBOL_GPL(klist_add_after);
EXPORT_SYMBOL_GPL(klist_add_behind);
/**
* klist_add_before - Init a klist_node and add it before an existing node

View File

@@ -1,3 +1,6 @@
#define pr_fmt(fmt) "list_sort_test: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list_sort.h>
@@ -47,6 +50,7 @@ static void merge_and_restore_back_links(void *priv,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
u8 count = 0;
while (a && b) {
/* if equal, take 'a' -- important for sort stability */
@@ -70,7 +74,8 @@ static void merge_and_restore_back_links(void *priv,
* element comparison is needed, so the client's cmp()
* routine can invoke cond_resched() periodically.
*/
(*cmp)(priv, tail->next, tail->next);
if (unlikely(!(++count)))
(*cmp)(priv, tail->next, tail->next);
tail->next->prev = tail;
tail = tail->next;
@@ -123,9 +128,7 @@ void list_sort(void *priv, struct list_head *head,
}
if (lev > max_lev) {
if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
printk_once(KERN_DEBUG "list passed to"
" list_sort() too long for"
" efficiency\n");
printk_once(KERN_DEBUG "list too long for efficiency\n");
lev--;
}
max_lev = lev;
@@ -168,27 +171,25 @@ static struct debug_el **elts __initdata;
static int __init check(struct debug_el *ela, struct debug_el *elb)
{
if (ela->serial >= TEST_LIST_LEN) {
printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
ela->serial);
pr_err("error: incorrect serial %d\n", ela->serial);
return -EINVAL;
}
if (elb->serial >= TEST_LIST_LEN) {
printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
elb->serial);
pr_err("error: incorrect serial %d\n", elb->serial);
return -EINVAL;
}
if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
printk(KERN_ERR "list_sort_test: error: phantom element\n");
pr_err("error: phantom element\n");
return -EINVAL;
}
if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
ela->poison1, ela->poison2);
pr_err("error: bad poison: %#x/%#x\n",
ela->poison1, ela->poison2);
return -EINVAL;
}
if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
elb->poison1, elb->poison2);
pr_err("error: bad poison: %#x/%#x\n",
elb->poison1, elb->poison2);
return -EINVAL;
}
return 0;
@@ -207,25 +208,23 @@ static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
static int __init list_sort_test(void)
{
int i, count = 1, err = -EINVAL;
int i, count = 1, err = -ENOMEM;
struct debug_el *el;
struct list_head *cur, *tmp;
struct list_head *cur;
LIST_HEAD(head);
printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n");
pr_debug("start testing list_sort()\n");
elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL);
elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
if (!elts) {
printk(KERN_ERR "list_sort_test: error: cannot allocate "
"memory\n");
goto exit;
pr_err("error: cannot allocate memory\n");
return err;
}
for (i = 0; i < TEST_LIST_LEN; i++) {
el = kmalloc(sizeof(*el), GFP_KERNEL);
if (!el) {
printk(KERN_ERR "list_sort_test: error: cannot "
"allocate memory\n");
pr_err("error: cannot allocate memory\n");
goto exit;
}
/* force some equivalencies */
@@ -239,52 +238,52 @@ static int __init list_sort_test(void)
list_sort(NULL, &head, cmp);
err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
if (cur->next->prev != cur) {
printk(KERN_ERR "list_sort_test: error: list is "
"corrupted\n");
pr_err("error: list is corrupted\n");
goto exit;
}
cmp_result = cmp(NULL, cur, cur->next);
if (cmp_result > 0) {
printk(KERN_ERR "list_sort_test: error: list is not "
"sorted\n");
pr_err("error: list is not sorted\n");
goto exit;
}
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
if (cmp_result == 0 && el->serial >= el1->serial) {
printk(KERN_ERR "list_sort_test: error: order of "
"equivalent elements not preserved\n");
pr_err("error: order of equivalent elements not "
"preserved\n");
goto exit;
}
if (check(el, el1)) {
printk(KERN_ERR "list_sort_test: error: element check "
"failed\n");
pr_err("error: element check failed\n");
goto exit;
}
count++;
}
if (head.prev != cur) {
pr_err("error: list is corrupted\n");
goto exit;
}
if (count != TEST_LIST_LEN) {
printk(KERN_ERR "list_sort_test: error: bad list length %d",
count);
pr_err("error: bad list length %d", count);
goto exit;
}
err = 0;
exit:
for (i = 0; i < TEST_LIST_LEN; i++)
kfree(elts[i]);
kfree(elts);
list_for_each_safe(cur, tmp, &head) {
list_del(cur);
kfree(container_of(cur, struct debug_el, list));
}
return err;
}
module_init(list_sort_test);

View File

@@ -1,6 +1,5 @@
#include <linux/export.h>
#include <linux/lockref.h>
#include <linux/mutex.h>
#if USE_CMPXCHG_LOCKREF
@@ -29,7 +28,7 @@
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
arch_mutex_cpu_relax(); \
cpu_relax_lowlatency(); \
} \
} while (0)

View File

@@ -169,7 +169,7 @@ out_fail:
return NULL;
}
void lc_free_by_index(struct lru_cache *lc, unsigned i)
static void lc_free_by_index(struct lru_cache *lc, unsigned i)
{
void *p = lc->lc_element[i];
WARN_ON(!p);
@@ -643,9 +643,10 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index)
* lc_dump - Dump a complete LRU cache to seq in textual form.
* @lc: the lru cache to operate on
* @seq: the &struct seq_file pointer to seq_printf into
* @utext: user supplied "heading" or other info
* @utext: user supplied additional "heading" or other info
* @detail: function pointer the user may provide to dump further details
* of the object the lc_element is embedded in.
* of the object the lc_element is embedded in. May be NULL.
* Note: a leading space ' ' and trailing newline '\n' is implied.
*/
void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
void (*detail) (struct seq_file *, struct lc_element *))
@@ -654,16 +655,18 @@ void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext
struct lc_element *e;
int i;
seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext);
seq_printf(seq, "\tnn: lc_number (new nr) refcnt %s\n ", utext);
for (i = 0; i < nr_elements; i++) {
e = lc_element_by_index(lc, i);
if (e->lc_number == LC_FREE) {
seq_printf(seq, "\t%2d: FREE\n", i);
} else {
seq_printf(seq, "\t%2d: %4u %4u ", i,
e->lc_number, e->refcnt);
if (e->lc_number != e->lc_new_number)
seq_printf(seq, "\t%5d: %6d %8d %6d ",
i, e->lc_number, e->lc_new_number, e->refcnt);
else
seq_printf(seq, "\t%5d: %6d %-8s %6d ",
i, e->lc_number, "-\"-", e->refcnt);
if (detail)
detail(seq, e);
}
seq_putc(seq, '\n');
}
}

View File

@@ -3,24 +3,24 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
int mac_pton(const char *s, u8 *mac)
bool mac_pton(const char *s, u8 *mac)
{
int i;
/* XX:XX:XX:XX:XX:XX */
if (strlen(s) < 3 * ETH_ALEN - 1)
return 0;
return false;
/* Don't dirty result unless string is valid MAC. */
for (i = 0; i < ETH_ALEN; i++) {
if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
return 0;
return false;
if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
return 0;
return false;
}
for (i = 0; i < ETH_ALEN; i++) {
mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
}
return 1;
return true;
}
EXPORT_SYMBOL(mac_pton);

View File

@@ -31,6 +31,11 @@
#define PCPU_COUNT_BIAS (1U << 31)
static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
{
return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
}
/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
@@ -46,8 +51,8 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
{
atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
ref->pcpu_count = alloc_percpu(unsigned);
if (!ref->pcpu_count)
ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
if (!ref->pcpu_count_ptr)
return -ENOMEM;
ref->release = release;
@@ -56,53 +61,71 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
EXPORT_SYMBOL_GPL(percpu_ref_init);
/**
* percpu_ref_cancel_init - cancel percpu_ref_init()
* @ref: percpu_ref to cancel init for
* percpu_ref_reinit - re-initialize a percpu refcount
* @ref: perpcu_ref to re-initialize
*
* Once a percpu_ref is initialized, its destruction is initiated by
* percpu_ref_kill() and completes asynchronously, which can be painful to
* do when destroying a half-constructed object in init failure path.
* Re-initialize @ref so that it's in the same state as when it finished
* percpu_ref_init(). @ref must have been initialized successfully, killed
* and reached 0 but not exited.
*
* This function destroys @ref without invoking @ref->release and the
* memory area containing it can be freed immediately on return. To
* prevent accidental misuse, it's required that @ref has finished
* percpu_ref_init(), whether successful or not, but never used.
*
* The weird name and usage restriction are to prevent people from using
* this function by mistake for normal shutdown instead of
* percpu_ref_kill().
* Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
* this function is in progress.
*/
void percpu_ref_cancel_init(struct percpu_ref *ref)
void percpu_ref_reinit(struct percpu_ref *ref)
{
unsigned __percpu *pcpu_count = ref->pcpu_count;
unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
int cpu;
WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
BUG_ON(!pcpu_count);
WARN_ON(!percpu_ref_is_zero(ref));
atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
/*
* Restore per-cpu operation. smp_store_release() is paired with
* smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
* that the zeroing is visible to all percpu accesses which can see
* the following PCPU_REF_DEAD clearing.
*/
for_each_possible_cpu(cpu)
*per_cpu_ptr(pcpu_count, cpu) = 0;
smp_store_release(&ref->pcpu_count_ptr,
ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
/**
* percpu_ref_exit - undo percpu_ref_init()
* @ref: percpu_ref to exit
*
* This function exits @ref. The caller is responsible for ensuring that
* @ref is no longer in active use. The usual places to invoke this
* function from are the @ref->release() callback or in init failure path
* where percpu_ref_init() succeeded but other parts of the initialization
* of the embedding object failed.
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
if (pcpu_count) {
for_each_possible_cpu(cpu)
WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
free_percpu(ref->pcpu_count);
free_percpu(pcpu_count);
ref->pcpu_count_ptr = PCPU_REF_DEAD;
}
}
EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_kill_rcu(struct rcu_head *rcu)
{
struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
unsigned __percpu *pcpu_count = ref->pcpu_count;
unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
unsigned count = 0;
int cpu;
/* Mask out PCPU_REF_DEAD */
pcpu_count = (unsigned __percpu *)
(((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK);
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(pcpu_count, cpu);
free_percpu(pcpu_count);
pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
/*
@@ -152,13 +175,28 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
"percpu_ref_kill() called more than once!\n");
ref->pcpu_count = (unsigned __percpu *)
(((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
ref->pcpu_count_ptr |= PCPU_REF_DEAD;
ref->confirm_kill = confirm_kill;
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
/*
* XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by
* block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18
* devel cycle. Do not use anywhere else.
*/
void __percpu_ref_kill_expedited(struct percpu_ref *ref)
{
WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
"percpu_ref_kill() called more than once on %pf!",
ref->release);
ref->pcpu_count_ptr |= PCPU_REF_DEAD;
synchronize_sched_expedited();
percpu_ref_kill_rcu(&ref->rcu);
}

View File

@@ -40,6 +40,10 @@
#ifdef CONFIG_RANDOM32_SELFTEST
static void __init prandom_state_selftest(void);
#else
static inline void prandom_state_selftest(void)
{
}
#endif
static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
@@ -53,8 +57,7 @@ static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
*/
u32 prandom_u32_state(struct rnd_state *state)
{
#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
@@ -147,21 +150,25 @@ static void prandom_warmup(struct rnd_state *state)
prandom_u32_state(state);
}
static void prandom_seed_very_weak(struct rnd_state *state, u32 seed)
static u32 __extract_hwseed(void)
{
/* Note: This sort of seeding is ONLY used in test cases and
* during boot at the time from core_initcall until late_initcall
* as we don't have a stronger entropy source available yet.
* After late_initcall, we reseed entire state, we have to (!),
* otherwise an attacker just needs to search 32 bit space to
* probe for our internal 128 bit state if he knows a couple
* of prandom32 outputs!
*/
#define LCG(x) ((x) * 69069U) /* super-duper LCG */
state->s1 = __seed(LCG(seed), 2U);
state->s2 = __seed(LCG(state->s1), 8U);
state->s3 = __seed(LCG(state->s2), 16U);
state->s4 = __seed(LCG(state->s3), 128U);
u32 val = 0;
(void)(arch_get_random_seed_int(&val) ||
arch_get_random_int(&val));
return val;
}
static void prandom_seed_early(struct rnd_state *state, u32 seed,
bool mix_with_hwseed)
{
#define LCG(x) ((x) * 69069U) /* super-duper LCG */
#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
}
/**
@@ -194,14 +201,13 @@ static int __init prandom_init(void)
{
int i;
#ifdef CONFIG_RANDOM32_SELFTEST
prandom_state_selftest();
#endif
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
u32 weak_seed = (i + jiffies) ^ random_get_entropy();
prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
prandom_seed_early(state, weak_seed, true);
prandom_warmup(state);
}
@@ -210,6 +216,7 @@ static int __init prandom_init(void)
core_initcall(prandom_init);
static void __prandom_timer(unsigned long dontcare);
static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
static void __prandom_timer(unsigned long dontcare)
@@ -419,7 +426,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
prandom_seed_very_weak(&state, test1[i].seed);
prandom_seed_early(&state, test1[i].seed, false);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
@@ -434,7 +441,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
prandom_seed_very_weak(&state, test2[i].seed);
prandom_seed_early(&state, test2[i].seed, false);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)

View File

@@ -101,7 +101,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* / \ / \
* p u --> P U
* / /
* n N
* n n
*
* However, since g's parent might be red, and
* 4) does not allow this, we need to recurse

790
lib/rhashtable.c Normal file
View File

@@ -0,0 +1,790 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
* Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
* Based on the following paper:
* https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
*
* Code partially derived from nft_hash
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/hash.h>
#include <linux/random.h>
#include <linux/rhashtable.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4UL
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
{
return ht->p.mutex_is_held();
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
#endif
static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
{
return (void *) he - ht->p.head_offset;
}
static u32 __hashfn(const struct rhashtable *ht, const void *key,
u32 len, u32 hsize)
{
u32 h;
h = ht->p.hashfn(key, len, ht->p.hash_rnd);
return h & (hsize - 1);
}
/**
* rhashtable_hashfn - compute hash for key of given length
* @ht: hash table to compuate for
* @key: pointer to key
* @len: length of key
*
* Computes the hash value using the hash function provided in the 'hashfn'
* of struct rhashtable_params. The returned value is guaranteed to be
* smaller than the number of buckets in the hash table.
*/
u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
{
struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
return __hashfn(ht, key, len, tbl->size);
}
EXPORT_SYMBOL_GPL(rhashtable_hashfn);
static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
{
if (unlikely(!ht->p.key_len)) {
u32 h;
h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
return h & (hsize - 1);
}
return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
}
/**
* rhashtable_obj_hashfn - compute hash for hashed object
* @ht: hash table to compuate for
* @ptr: pointer to hashed object
*
* Computes the hash value using the hash function `hashfn` respectively
* 'obj_hashfn' depending on whether the hash table is set up to work with
* a fixed length key. The returned value is guaranteed to be smaller than
* the number of buckets in the hash table.
*/
u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
{
struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
return obj_hashfn(ht, ptr, tbl->size);
}
EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
static u32 head_hashfn(const struct rhashtable *ht,
const struct rhash_head *he, u32 hsize)
{
return obj_hashfn(ht, rht_obj(ht, he), hsize);
}
static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
{
struct bucket_table *tbl;
size_t size;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
tbl = kzalloc(size, flags);
if (tbl == NULL)
tbl = vzalloc(size);
if (tbl == NULL)
return NULL;
tbl->size = nbuckets;
return tbl;
}
static void bucket_table_free(const struct bucket_table *tbl)
{
kvfree(tbl);
}
/**
* rht_grow_above_75 - returns true if nelems > 0.75 * table-size
* @ht: hash table
* @new_size: new table size
*/
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
{
/* Expand table when exceeding 75% load */
return ht->nelems > (new_size / 4 * 3);
}
EXPORT_SYMBOL_GPL(rht_grow_above_75);
/**
* rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
* @ht: hash table
* @new_size: new table size
*/
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
{
/* Shrink table beneath 30% load */
return ht->nelems < (new_size * 3 / 10);
}
EXPORT_SYMBOL_GPL(rht_shrink_below_30);
static void hashtable_chain_unzip(const struct rhashtable *ht,
const struct bucket_table *new_tbl,
struct bucket_table *old_tbl, size_t n)
{
struct rhash_head *he, *p, *next;
unsigned int h;
/* Old bucket empty, no work needed. */
p = rht_dereference(old_tbl->buckets[n], ht);
if (!p)
return;
/* Advance the old bucket pointer one or more times until it
* reaches a node that doesn't hash to the same bucket as the
* previous node p. Call the previous node p;
*/
h = head_hashfn(ht, p, new_tbl->size);
rht_for_each(he, p->next, ht) {
if (head_hashfn(ht, he, new_tbl->size) != h)
break;
p = he;
}
RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
/* Find the subsequent node which does hash to the same
* bucket as node P, or NULL if no such node exists.
*/
next = NULL;
if (he) {
rht_for_each(he, he->next, ht) {
if (head_hashfn(ht, he, new_tbl->size) == h) {
next = he;
break;
}
}
}
/* Set p's next pointer to that subsequent node pointer,
* bypassing the nodes which do not hash to p's bucket
*/
RCU_INIT_POINTER(p->next, next);
}
/**
* rhashtable_expand - Expand hash table while allowing concurrent lookups
* @ht: the hash table to expand
* @flags: allocation flags
*
* A secondary bucket array is allocated and the hash entries are migrated
* while keeping them on both lists until the end of the RCU grace period.
*
* This function may only be called in a context where it is safe to call
* synchronize_rcu(), e.g. not within a rcu_read_lock() section.
*
* The caller must ensure that no concurrent table mutations take place.
* It is however valid to have concurrent lookups if they are RCU protected.
*/
int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
struct rhash_head *he;
unsigned int i, h;
bool complete;
ASSERT_RHT_MUTEX(ht);
if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
return 0;
new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
if (new_tbl == NULL)
return -ENOMEM;
ht->shift++;
/* For each new bucket, search the corresponding old bucket
* for the first entry that hashes to the new bucket, and
* link the new bucket to that entry. Since all the entries
* which will end up in the new bucket appear in the same
* old bucket, this constructs an entirely valid new hash
* table, but with multiple buckets "zipped" together into a
* single imprecise chain.
*/
for (i = 0; i < new_tbl->size; i++) {
h = i & (old_tbl->size - 1);
rht_for_each(he, old_tbl->buckets[h], ht) {
if (head_hashfn(ht, he, new_tbl->size) == i) {
RCU_INIT_POINTER(new_tbl->buckets[i], he);
break;
}
}
}
/* Publish the new table pointer. Lookups may now traverse
* the new table, but they will not benefit from any
* additional efficiency until later steps unzip the buckets.
*/
rcu_assign_pointer(ht->tbl, new_tbl);
/* Unzip interleaved hash chains */
do {
/* Wait for readers. All new readers will see the new
* table, and thus no references to the old table will
* remain.
*/
synchronize_rcu();
/* For each bucket in the old table (each of which
* contains items from multiple buckets of the new
* table): ...
*/
complete = true;
for (i = 0; i < old_tbl->size; i++) {
hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
if (old_tbl->buckets[i] != NULL)
complete = false;
}
} while (!complete);
bucket_table_free(old_tbl);
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);
/**
* rhashtable_shrink - Shrink hash table while allowing concurrent lookups
* @ht: the hash table to shrink
* @flags: allocation flags
*
* This function may only be called in a context where it is safe to call
* synchronize_rcu(), e.g. not within a rcu_read_lock() section.
*
* The caller must ensure that no concurrent table mutations take place.
* It is however valid to have concurrent lookups if they are RCU protected.
*/
int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
{
struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
struct rhash_head __rcu **pprev;
unsigned int i;
ASSERT_RHT_MUTEX(ht);
if (tbl->size <= HASH_MIN_SIZE)
return 0;
ntbl = bucket_table_alloc(tbl->size / 2, flags);
if (ntbl == NULL)
return -ENOMEM;
ht->shift--;
/* Link each bucket in the new table to the first bucket
* in the old table that contains entries which will hash
* to the new bucket.
*/
for (i = 0; i < ntbl->size; i++) {
ntbl->buckets[i] = tbl->buckets[i];
/* Link each bucket in the new table to the first bucket
* in the old table that contains entries which will hash
* to the new bucket.
*/
for (pprev = &ntbl->buckets[i]; *pprev != NULL;
pprev = &rht_dereference(*pprev, ht)->next)
;
RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
}
/* Publish the new, valid hash table */
rcu_assign_pointer(ht->tbl, ntbl);
/* Wait for readers. No new readers will have references to the
* old hash table.
*/
synchronize_rcu();
bucket_table_free(tbl);
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);
/**
* rhashtable_insert - insert object into hash hash table
* @ht: hash table
* @obj: pointer to hash head inside object
* @flags: allocation flags (table expansion)
*
* Will automatically grow the table via rhashtable_expand() if the the
* grow_decision function specified at rhashtable_init() returns true.
*
* The caller must ensure that no concurrent table mutations occur. It is
* however valid to have concurrent lookups if they are RCU protected.
*/
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
gfp_t flags)
{
struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
u32 hash;
ASSERT_RHT_MUTEX(ht);
hash = head_hashfn(ht, obj, tbl->size);
RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
rcu_assign_pointer(tbl->buckets[hash], obj);
ht->nelems++;
if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
rhashtable_expand(ht, flags);
}
EXPORT_SYMBOL_GPL(rhashtable_insert);
/**
* rhashtable_remove_pprev - remove object from hash table given previous element
* @ht: hash table
* @obj: pointer to hash head inside object
* @pprev: pointer to previous element
* @flags: allocation flags (table expansion)
*
* Identical to rhashtable_remove() but caller is alreayd aware of the element
* in front of the element to be deleted. This is in particular useful for
* deletion when combined with walking or lookup.
*/
void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
struct rhash_head __rcu **pprev, gfp_t flags)
{
struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
ASSERT_RHT_MUTEX(ht);
RCU_INIT_POINTER(*pprev, obj->next);
ht->nelems--;
if (ht->p.shrink_decision &&
ht->p.shrink_decision(ht, tbl->size))
rhashtable_shrink(ht, flags);
}
EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
/**
* rhashtable_remove - remove object from hash table
* @ht: hash table
* @obj: pointer to hash head inside object
* @flags: allocation flags (table expansion)
*
* Since the hash chain is single linked, the removal operation needs to
* walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized.
*
* Will automatically shrink the table via rhashtable_expand() if the the
* shrink_decision function specified at rhashtable_init() returns true.
*
* The caller must ensure that no concurrent table mutations occur. It is
* however valid to have concurrent lookups if they are RCU protected.
*/
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
gfp_t flags)
{
struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
struct rhash_head __rcu **pprev;
struct rhash_head *he;
u32 h;
ASSERT_RHT_MUTEX(ht);
h = head_hashfn(ht, obj, tbl->size);
pprev = &tbl->buckets[h];
rht_for_each(he, tbl->buckets[h], ht) {
if (he != obj) {
pprev = &he->next;
continue;
}
rhashtable_remove_pprev(ht, he, pprev, flags);
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(rhashtable_remove);
/**
* rhashtable_lookup - lookup key in hash table
* @ht: hash table
* @key: pointer to key
*
* Computes the hash value for the key and traverses the bucket chain looking
* for a entry with an identical key. The first matching entry is returned.
*
* This lookup function may only be used for fixed key hash table (key_len
* paramter set). It will BUG() if used inappropriately.
*
* Lookups may occur in parallel with hash mutations as long as the lookup is
* guarded by rcu_read_lock(). The caller must take care of this.
*/
void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
{
const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
struct rhash_head *he;
u32 h;
BUG_ON(!ht->p.key_len);
h = __hashfn(ht, key, ht->p.key_len, tbl->size);
rht_for_each_rcu(he, tbl->buckets[h], ht) {
if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
ht->p.key_len))
continue;
return (void *) he - ht->p.head_offset;
}
return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup);
/**
* rhashtable_lookup_compare - search hash table with compare function
* @ht: hash table
* @hash: hash value of desired entry
* @compare: compare function, must return true on match
* @arg: argument passed on to compare function
*
* Traverses the bucket chain behind the provided hash value and calls the
* specified compare function for each entry.
*
* Lookups may occur in parallel with hash mutations as long as the lookup is
* guarded by rcu_read_lock(). The caller must take care of this.
*
* Returns the first entry on which the compare function returned true.
*/
void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
bool (*compare)(void *, void *), void *arg)
{
const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
struct rhash_head *he;
if (unlikely(hash >= tbl->size))
return NULL;
rht_for_each_rcu(he, tbl->buckets[hash], ht) {
if (!compare(rht_obj(ht, he), arg))
continue;
return (void *) he - ht->p.head_offset;
}
return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
static size_t rounded_hashtable_size(unsigned int nelem)
{
return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
}
/**
* rhashtable_init - initialize a new hash table
* @ht: hash table to be initialized
* @params: configuration parameters
*
* Initializes a new hash table based on the provided configuration
* parameters. A table can be configured either with a variable or
* fixed length key:
*
* Configuration Example 1: Fixed length keys
* struct test_obj {
* int key;
* void * my_member;
* struct rhash_head node;
* };
*
* struct rhashtable_params params = {
* .head_offset = offsetof(struct test_obj, node),
* .key_offset = offsetof(struct test_obj, key),
* .key_len = sizeof(int),
* .hashfn = arch_fast_hash,
* .mutex_is_held = &my_mutex_is_held,
* };
*
* Configuration Example 2: Variable length keys
* struct test_obj {
* [...]
* struct rhash_head node;
* };
*
* u32 my_hash_fn(const void *data, u32 seed)
* {
* struct test_obj *obj = data;
*
* return [... hash ...];
* }
*
* struct rhashtable_params params = {
* .head_offset = offsetof(struct test_obj, node),
* .hashfn = arch_fast_hash,
* .obj_hashfn = my_hash_fn,
* .mutex_is_held = &my_mutex_is_held,
* };
*/
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
{
struct bucket_table *tbl;
size_t size;
size = HASH_DEFAULT_SIZE;
if ((params->key_len && !params->hashfn) ||
(!params->key_len && !params->obj_hashfn))
return -EINVAL;
if (params->nelem_hint)
size = rounded_hashtable_size(params->nelem_hint);
tbl = bucket_table_alloc(size, GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
memset(ht, 0, sizeof(*ht));
ht->shift = ilog2(tbl->size);
memcpy(&ht->p, params, sizeof(*params));
RCU_INIT_POINTER(ht->tbl, tbl);
if (!ht->p.hash_rnd)
get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);
/**
* rhashtable_destroy - destroy hash table
* @ht: the hash table to destroy
*
* Frees the bucket array. This function is not rcu safe, therefore the caller
* has to make sure that no resizing may happen by unpublishing the hashtable
* and waiting for the quiescent cycle before releasing the bucket array.
*/
void rhashtable_destroy(const struct rhashtable *ht)
{
bucket_table_free(ht->tbl);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
/**************************************************************************
* Self Test
**************************************************************************/
#ifdef CONFIG_TEST_RHASHTABLE
#define TEST_HT_SIZE 8
#define TEST_ENTRIES 2048
#define TEST_PTR ((void *) 0xdeadbeef)
#define TEST_NEXPANDS 4
static int test_mutex_is_held(void)
{
return 1;
}
struct test_obj {
void *ptr;
int value;
struct rhash_head node;
};
static int __init test_rht_lookup(struct rhashtable *ht)
{
unsigned int i;
for (i = 0; i < TEST_ENTRIES * 2; i++) {
struct test_obj *obj;
bool expected = !(i % 2);
u32 key = i;
obj = rhashtable_lookup(ht, &key);
if (expected && !obj) {
pr_warn("Test failed: Could not find key %u\n", key);
return -ENOENT;
} else if (!expected && obj) {
pr_warn("Test failed: Unexpected entry found for key %u\n",
key);
return -EEXIST;
} else if (expected && obj) {
if (obj->ptr != TEST_PTR || obj->value != i) {
pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
obj->ptr, TEST_PTR, obj->value, i);
return -EINVAL;
}
}
}
return 0;
}
static void test_bucket_stats(struct rhashtable *ht,
struct bucket_table *tbl,
bool quiet)
{
unsigned int cnt, i, total = 0;
struct test_obj *obj;
for (i = 0; i < tbl->size; i++) {
cnt = 0;
if (!quiet)
pr_info(" [%#4x/%zu]", i, tbl->size);
rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
cnt++;
total++;
if (!quiet)
pr_cont(" [%p],", obj);
}
if (!quiet)
pr_cont("\n [%#x] first element: %p, chain length: %u\n",
i, tbl->buckets[i], cnt);
}
pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
total, ht->nelems, TEST_ENTRIES);
}
static int __init test_rhashtable(struct rhashtable *ht)
{
struct bucket_table *tbl;
struct test_obj *obj, *next;
int err;
unsigned int i;
/*
* Insertion Test:
* Insert TEST_ENTRIES into table with all keys even numbers
*/
pr_info(" Adding %d keys\n", TEST_ENTRIES);
for (i = 0; i < TEST_ENTRIES; i++) {
struct test_obj *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj) {
err = -ENOMEM;
goto error;
}
obj->ptr = TEST_PTR;
obj->value = i * 2;
rhashtable_insert(ht, &obj->node, GFP_KERNEL);
}
rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht);
test_bucket_stats(ht, tbl, true);
test_rht_lookup(ht);
rcu_read_unlock();
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table expansion iteration %u...\n", i);
rhashtable_expand(ht, GFP_KERNEL);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
test_rht_lookup(ht);
rcu_read_unlock();
}
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table shrinkage iteration %u...\n", i);
rhashtable_shrink(ht, GFP_KERNEL);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
test_rht_lookup(ht);
rcu_read_unlock();
}
pr_info(" Deleting %d keys\n", TEST_ENTRIES);
for (i = 0; i < TEST_ENTRIES; i++) {
u32 key = i * 2;
obj = rhashtable_lookup(ht, &key);
BUG_ON(!obj);
rhashtable_remove(ht, &obj->node, GFP_KERNEL);
kfree(obj);
}
return 0;
error:
tbl = rht_dereference_rcu(ht->tbl, ht);
for (i = 0; i < tbl->size; i++)
rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
kfree(obj);
return err;
}
static int __init test_rht_init(void)
{
struct rhashtable ht;
struct rhashtable_params params = {
.nelem_hint = TEST_HT_SIZE,
.head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(int),
.hashfn = arch_fast_hash,
.mutex_is_held = &test_mutex_is_held,
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
};
int err;
pr_info("Running resizable hashtable tests...\n");
err = rhashtable_init(&ht, &params);
if (err < 0) {
pr_warn("Test failed: Unable to initialize hashtable: %d\n",
err);
return err;
}
err = test_rhashtable(&ht);
rhashtable_destroy(&ht);
return err;
}
subsys_initcall(test_rht_init);
#endif /* CONFIG_TEST_RHASHTABLE */

View File

@@ -73,7 +73,7 @@ EXPORT_SYMBOL(sg_nents);
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
#ifndef ARCH_HAS_SG_CHAIN
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
struct scatterlist *ret = &sgl[nents - 1];
#else
struct scatterlist *sg, *ret = NULL;
@@ -165,6 +165,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
* __sg_free_table - Free a previously mapped sg table
* @table: The sg table header to use
* @max_ents: The maximum number of entries per single scatterlist
* @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
* @free_fn: Free function
*
* Description:
@@ -174,7 +175,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
sg_free_fn *free_fn)
bool skip_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
@@ -202,7 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
}
table->orig_nents -= sg_size;
free_fn(sgl, alloc_size);
if (!skip_first_chunk) {
free_fn(sgl, alloc_size);
skip_first_chunk = false;
}
sgl = next;
}
@@ -217,7 +221,7 @@ EXPORT_SYMBOL(__sg_free_table);
**/
void sg_free_table(struct sg_table *table)
{
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
}
EXPORT_SYMBOL(sg_free_table);
@@ -241,8 +245,8 @@ EXPORT_SYMBOL(sg_free_table);
*
**/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
unsigned int max_ents, gfp_t gfp_mask,
sg_alloc_fn *alloc_fn)
unsigned int max_ents, struct scatterlist *first_chunk,
gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
@@ -251,7 +255,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
if (nents == 0)
return -EINVAL;
#ifndef ARCH_HAS_SG_CHAIN
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
if (WARN_ON_ONCE(nents > max_ents))
return -EINVAL;
#endif
@@ -269,7 +273,12 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
left -= sg_size;
sg = alloc_fn(alloc_size, gfp_mask);
if (first_chunk) {
sg = first_chunk;
first_chunk = NULL;
} else {
sg = alloc_fn(alloc_size, gfp_mask);
}
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
@@ -324,9 +333,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
gfp_mask, sg_kmalloc);
NULL, gfp_mask, sg_kmalloc);
if (unlikely(ret))
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
return ret;
}

View File

@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes);
value64 = value;
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else

View File

@@ -25,12 +25,15 @@
int string_get_size(u64 size, const enum string_size_units units,
char *buf, int len)
{
static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
"EB", "ZB", "YB", NULL};
static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
"EiB", "ZiB", "YiB", NULL };
static const char **units_str[] = {
[STRING_UNITS_10] = units_10,
static const char *const units_10[] = {
"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", NULL
};
static const char *const units_2[] = {
"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
NULL
};
static const char *const *const units_str[] = {
[STRING_UNITS_10] = units_10,
[STRING_UNITS_2] = units_2,
};
static const unsigned int divisor[] = {

View File

@@ -3,7 +3,7 @@
#include <linux/module.h>
#define for_each_test(i, test) \
for (i = 0; i < sizeof(test) / sizeof(test[0]); i++)
for (i = 0; i < ARRAY_SIZE(test); i++)
struct test_fail {
const char *str;

View File

@@ -66,7 +66,7 @@ struct bpf_test {
const char *descr;
union {
struct sock_filter insns[MAX_INSNS];
struct sock_filter_int insns_int[MAX_INSNS];
struct bpf_insn insns_int[MAX_INSNS];
} u;
__u8 aux;
__u8 data[MAX_DATA];
@@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp)
return len + 1;
}
static struct sk_filter *generate_filter(int which, int *err)
static struct bpf_prog *generate_filter(int which, int *err)
{
struct sk_filter *fp;
struct bpf_prog *fp;
struct sock_fprog_kern fprog;
unsigned int flen = probe_filter_length(tests[which].u.insns);
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
@@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err)
fprog.filter = tests[which].u.insns;
fprog.len = flen;
*err = sk_unattached_filter_create(&fp, &fprog);
*err = bpf_prog_create(&fp, &fprog);
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
if (*err == -EINVAL) {
pr_cont("PASS\n");
@@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err)
break;
case INTERNAL:
fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
if (fp == NULL) {
pr_cont("UNEXPECTED_FAIL no memory left\n");
*err = -ENOMEM;
@@ -1807,9 +1807,9 @@ static struct sk_filter *generate_filter(int which, int *err)
fp->len = flen;
memcpy(fp->insnsi, tests[which].u.insns_int,
fp->len * sizeof(struct sock_filter_int));
fp->len * sizeof(struct bpf_insn));
sk_filter_select_runtime(fp);
bpf_prog_select_runtime(fp);
break;
}
@@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err)
return fp;
}
static void release_filter(struct sk_filter *fp, int which)
static void release_filter(struct bpf_prog *fp, int which)
{
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
switch (test_type) {
case CLASSIC:
sk_unattached_filter_destroy(fp);
bpf_prog_destroy(fp);
break;
case INTERNAL:
sk_filter_free(fp);
bpf_prog_free(fp);
break;
}
}
static int __run_one(const struct sk_filter *fp, const void *data,
static int __run_one(const struct bpf_prog *fp, const void *data,
int runs, u64 *duration)
{
u64 start, finish;
@@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
start = ktime_to_us(ktime_get());
for (i = 0; i < runs; i++)
ret = SK_RUN_FILTER(fp, data);
ret = BPF_PROG_RUN(fp, data);
finish = ktime_to_us(ktime_get());
@@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
return ret;
}
static int run_one(const struct sk_filter *fp, struct bpf_test *test)
static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
{
int err_cnt = 0, i, runs = MAX_TESTRUNS;
@@ -1884,7 +1884,7 @@ static __init int test_bpf(void)
int i, err_cnt = 0, pass_cnt = 0;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct sk_filter *fp;
struct bpf_prog *fp;
int err;
pr_info("#%d %s ", i, tests[i].descr);

117
lib/test_firmware.c Normal file
View File

@@ -0,0 +1,117 @@
/*
* This module provides an interface to trigger and test firmware loading.
*
* It is designed to be used for basic evaluation of the firmware loading
* subsystem (for example when validating firmware verification). It lacks
* any extra dependencies, and will not normally be loaded by the system
* unless explicitly requested by name.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/firmware.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
static DEFINE_MUTEX(test_fw_mutex);
static const struct firmware *test_firmware;
static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
size_t size, loff_t *offset)
{
ssize_t rc = 0;
mutex_lock(&test_fw_mutex);
if (test_firmware)
rc = simple_read_from_buffer(buf, size, offset,
test_firmware->data,
test_firmware->size);
mutex_unlock(&test_fw_mutex);
return rc;
}
static const struct file_operations test_fw_fops = {
.owner = THIS_MODULE,
.read = test_fw_misc_read,
};
static struct miscdevice test_fw_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "test_firmware",
.fops = &test_fw_fops,
};
static ssize_t trigger_request_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
char *name;
name = kzalloc(count + 1, GFP_KERNEL);
if (!name)
return -ENOSPC;
memcpy(name, buf, count);
pr_info("loading '%s'\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
test_firmware = NULL;
rc = request_firmware(&test_firmware, name, dev);
if (rc)
pr_info("load of '%s' failed: %d\n", name, rc);
pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
mutex_unlock(&test_fw_mutex);
kfree(name);
return count;
}
static DEVICE_ATTR_WO(trigger_request);
static int __init test_firmware_init(void)
{
int rc;
rc = misc_register(&test_fw_misc_device);
if (rc) {
pr_err("could not register misc device: %d\n", rc);
return rc;
}
rc = device_create_file(test_fw_misc_device.this_device,
&dev_attr_trigger_request);
if (rc) {
pr_err("could not create sysfs interface: %d\n", rc);
goto dereg;
}
pr_warn("interface ready\n");
return 0;
dereg:
misc_deregister(&test_fw_misc_device);
return rc;
}
module_init(test_firmware_init);
static void __exit test_firmware_exit(void)
{
release_firmware(test_firmware);
device_remove_file(test_fw_misc_device.this_device,
&dev_attr_trigger_request);
misc_deregister(&test_fw_misc_device);
pr_warn("removed interface\n");
}
module_exit(test_firmware_exit);
MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
MODULE_LICENSE("GPL");

View File

@@ -249,52 +249,6 @@ int zlib_deflateInit2(
return zlib_deflateReset(strm);
}
/* ========================================================================= */
#if 0
int zlib_deflateSetDictionary(
z_streamp strm,
const Byte *dictionary,
uInt dictLength
)
{
deflate_state *s;
uInt length = dictLength;
uInt n;
IPos hash_head = 0;
if (strm == NULL || strm->state == NULL || dictionary == NULL)
return Z_STREAM_ERROR;
s = (deflate_state *) strm->state;
if (s->status != INIT_STATE) return Z_STREAM_ERROR;
strm->adler = zlib_adler32(strm->adler, dictionary, dictLength);
if (length < MIN_MATCH) return Z_OK;
if (length > MAX_DIST(s)) {
length = MAX_DIST(s);
#ifndef USE_DICT_HEAD
dictionary += dictLength - length; /* use the tail of the dictionary */
#endif
}
memcpy((char *)s->window, dictionary, length);
s->strstart = length;
s->block_start = (long)length;
/* Insert all strings in the hash table (except for the last two bytes).
* s->lookahead stays null, so s->ins_h will be recomputed at the next
* call of fill_window.
*/
s->ins_h = s->window[0];
UPDATE_HASH(s, s->ins_h, s->window[1]);
for (n = 0; n <= length - MIN_MATCH; n++) {
INSERT_STRING(s, n, hash_head);
}
if (hash_head) hash_head = 0; /* to make compiler happy */
return Z_OK;
}
#endif /* 0 */
/* ========================================================================= */
int zlib_deflateReset(
z_streamp strm
@@ -326,45 +280,6 @@ int zlib_deflateReset(
return Z_OK;
}
/* ========================================================================= */
#if 0
int zlib_deflateParams(
z_streamp strm,
int level,
int strategy
)
{
deflate_state *s;
compress_func func;
int err = Z_OK;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
s = (deflate_state *) strm->state;
if (level == Z_DEFAULT_COMPRESSION) {
level = 6;
}
if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
return Z_STREAM_ERROR;
}
func = configuration_table[s->level].func;
if (func != configuration_table[level].func && strm->total_in != 0) {
/* Flush the last buffer: */
err = zlib_deflate(strm, Z_PARTIAL_FLUSH);
}
if (s->level != level) {
s->level = level;
s->max_lazy_match = configuration_table[level].max_lazy;
s->good_match = configuration_table[level].good_length;
s->nice_match = configuration_table[level].nice_length;
s->max_chain_length = configuration_table[level].max_chain;
}
s->strategy = strategy;
return err;
}
#endif /* 0 */
/* =========================================================================
* Put a short in the pending buffer. The 16-bit value is put in MSB order.
* IN assertion: the stream state is correct and there is enough room in
@@ -568,64 +483,6 @@ int zlib_deflateEnd(
return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
}
/* =========================================================================
* Copy the source state to the destination state.
*/
#if 0
int zlib_deflateCopy (
z_streamp dest,
z_streamp source
)
{
#ifdef MAXSEG_64K
return Z_STREAM_ERROR;
#else
deflate_state *ds;
deflate_state *ss;
ush *overlay;
deflate_workspace *mem;
if (source == NULL || dest == NULL || source->state == NULL) {
return Z_STREAM_ERROR;
}
ss = (deflate_state *) source->state;
*dest = *source;
mem = (deflate_workspace *) dest->workspace;
ds = &(mem->deflate_memory);
dest->state = (struct internal_state *) ds;
*ds = *ss;
ds->strm = dest;
ds->window = (Byte *) mem->window_memory;
ds->prev = (Pos *) mem->prev_memory;
ds->head = (Pos *) mem->head_memory;
overlay = (ush *) mem->overlay_memory;
ds->pending_buf = (uch *) overlay;
memcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
memcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
memcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
memcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
ds->l_desc.dyn_tree = ds->dyn_ltree;
ds->d_desc.dyn_tree = ds->dyn_dtree;
ds->bl_desc.dyn_tree = ds->bl_tree;
return Z_OK;
#endif
}
#endif /* 0 */
/* ===========================================================================
* Read a new buffer from the current input stream, update the adler32
* and total number of bytes read. All deflate() input goes through

View File

@@ -45,21 +45,6 @@ int zlib_inflateReset(z_streamp strm)
return Z_OK;
}
#if 0
int zlib_inflatePrime(z_streamp strm, int bits, int value)
{
struct inflate_state *state;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR;
value &= (1L << bits) - 1;
state->hold += value << state->bits;
state->bits += bits;
return Z_OK;
}
#endif
int zlib_inflateInit2(z_streamp strm, int windowBits)
{
struct inflate_state *state;
@@ -761,123 +746,6 @@ int zlib_inflateEnd(z_streamp strm)
return Z_OK;
}
#if 0
int zlib_inflateSetDictionary(z_streamp strm, const Byte *dictionary,
uInt dictLength)
{
struct inflate_state *state;
unsigned long id;
/* check state */
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (state->wrap != 0 && state->mode != DICT)
return Z_STREAM_ERROR;
/* check for correct dictionary id */
if (state->mode == DICT) {
id = zlib_adler32(0L, NULL, 0);
id = zlib_adler32(id, dictionary, dictLength);
if (id != state->check)
return Z_DATA_ERROR;
}
/* copy dictionary to window */
zlib_updatewindow(strm, strm->avail_out);
if (dictLength > state->wsize) {
memcpy(state->window, dictionary + dictLength - state->wsize,
state->wsize);
state->whave = state->wsize;
}
else {
memcpy(state->window + state->wsize - dictLength, dictionary,
dictLength);
state->whave = dictLength;
}
state->havedict = 1;
return Z_OK;
}
#endif
#if 0
/*
Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found
or when out of input. When called, *have is the number of pattern bytes
found in order so far, in 0..3. On return *have is updated to the new
state. If on return *have equals four, then the pattern was found and the
return value is how many bytes were read including the last byte of the
pattern. If *have is less than four, then the pattern has not been found
yet and the return value is len. In the latter case, zlib_syncsearch() can be
called again with more data and the *have state. *have is initialized to
zero for the first call.
*/
static unsigned zlib_syncsearch(unsigned *have, unsigned char *buf,
unsigned len)
{
unsigned got;
unsigned next;
got = *have;
next = 0;
while (next < len && got < 4) {
if ((int)(buf[next]) == (got < 2 ? 0 : 0xff))
got++;
else if (buf[next])
got = 0;
else
got = 4 - got;
next++;
}
*have = got;
return next;
}
#endif
#if 0
int zlib_inflateSync(z_streamp strm)
{
unsigned len; /* number of bytes to look at or looked at */
unsigned long in, out; /* temporary to save total_in and total_out */
unsigned char buf[4]; /* to restore bit buffer to byte string */
struct inflate_state *state;
/* check parameters */
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR;
/* if first time, start search in bit buffer */
if (state->mode != SYNC) {
state->mode = SYNC;
state->hold <<= state->bits & 7;
state->bits -= state->bits & 7;
len = 0;
while (state->bits >= 8) {
buf[len++] = (unsigned char)(state->hold);
state->hold >>= 8;
state->bits -= 8;
}
state->have = 0;
zlib_syncsearch(&(state->have), buf, len);
}
/* search available input */
len = zlib_syncsearch(&(state->have), strm->next_in, strm->avail_in);
strm->avail_in -= len;
strm->next_in += len;
strm->total_in += len;
/* return no joy or set up to restart inflate() on a new block */
if (state->have != 4) return Z_DATA_ERROR;
in = strm->total_in; out = strm->total_out;
zlib_inflateReset(strm);
strm->total_in = in; strm->total_out = out;
state->mode = TYPE;
return Z_OK;
}
#endif
/*
* This subroutine adds the data at next_in/avail_in to the output history
* without performing any output. The output buffer must be "caught up";