Merge branch 'linus/master' into rdma.git for-next
rdma.git merge resolution for the 4.19 merge window Conflicts: drivers/infiniband/core/rdma_core.c - Use the rdma code and revise with the new spelling for atomic_fetch_add_unless drivers/nvme/host/rdma.c - Replace max_sge with max_send_sge in new blk code drivers/nvme/target/rdma.c - Use the blk code and revise to use NULL for ib_post_recv when appropriate - Replace max_sge with max_recv_sge in new blk code net/rds/ib_send.c - Use the net code and revise to use NULL for ib_post_recv when appropriate Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -1,3 +1,5 @@
|
||||
menu "Kernel hacking"
|
||||
|
||||
menu "printk and dmesg options"
|
||||
|
||||
config PRINTK_TIME
|
||||
@@ -30,6 +32,17 @@ config CONSOLE_LOGLEVEL_DEFAULT
|
||||
usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT
|
||||
option.
|
||||
|
||||
config CONSOLE_LOGLEVEL_QUIET
|
||||
int "quiet console loglevel (1-15)"
|
||||
range 1 15
|
||||
default "4"
|
||||
help
|
||||
loglevel to use when "quiet" is passed on the kernel commandline.
|
||||
|
||||
When "quiet" is passed on the kernel commandline this loglevel
|
||||
will be used as the loglevel. IOW passing "quiet" will be the
|
||||
equivalent of passing "loglevel=<CONSOLE_LOGLEVEL_QUIET>"
|
||||
|
||||
config MESSAGE_LOGLEVEL_DEFAULT
|
||||
int "Default message log level (1-7)"
|
||||
range 1 7
|
||||
@@ -1193,6 +1206,7 @@ config DEBUG_ATOMIC_SLEEP
|
||||
bool "Sleep inside atomic section checking"
|
||||
select PREEMPT_COUNT
|
||||
depends on DEBUG_KERNEL
|
||||
depends on !ARCH_NO_PREEMPT
|
||||
help
|
||||
If you say Y here, various routines which may sleep will become very
|
||||
noisy if they are called inside atomic sections: when a spinlock is
|
||||
@@ -1718,7 +1732,7 @@ config KPROBES_SANITY_TEST
|
||||
default n
|
||||
help
|
||||
This option provides for testing basic kprobes functionality on
|
||||
boot. A sample kprobe, jprobe and kretprobe are inserted and
|
||||
boot. Samples of kprobe and kretprobe are inserted and
|
||||
verified for functionality.
|
||||
|
||||
Say N if you are unsure.
|
||||
@@ -1802,6 +1816,13 @@ config TEST_BITMAP
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config TEST_BITFIELD
|
||||
tristate "Test bitfield functions at runtime"
|
||||
help
|
||||
Enable this option to test the bitfield functions at boot.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config TEST_UUID
|
||||
tristate "Test functions located in the uuid module at runtime"
|
||||
|
||||
@@ -2034,3 +2055,7 @@ config IO_STRICT_DEVMEM
|
||||
if the driver using a given range cannot be disabled.
|
||||
|
||||
If in doubt, say Y.
|
||||
|
||||
source "arch/$(SRCARCH)/Kconfig.debug"
|
||||
|
||||
endmenu # Kernel hacking
|
||||
|
@@ -37,7 +37,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
|
||||
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
|
||||
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
|
||||
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
|
||||
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
|
||||
percpu-refcount.o rhashtable.o reciprocal_div.o \
|
||||
once.o refcount.o usercopy.o errseq.o bucket_locks.o
|
||||
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
|
||||
obj-y += string_helpers.o
|
||||
@@ -65,6 +65,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
|
||||
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
|
||||
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
|
||||
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
|
||||
obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
|
||||
obj-$(CONFIG_TEST_UUID) += test_uuid.o
|
||||
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
|
||||
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
|
||||
@@ -116,6 +117,7 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
|
||||
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
|
||||
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
|
||||
obj-$(CONFIG_BCH) += bch.o
|
||||
CFLAGS_bch.o := $(call cc-option,-Wframe-larger-than=4500)
|
||||
obj-$(CONFIG_LZO_COMPRESS) += lzo/
|
||||
obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
|
||||
obj-$(CONFIG_LZ4_COMPRESS) += lz4/
|
||||
|
@@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new)
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_xchg);
|
||||
|
||||
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
int ret = 0;
|
||||
long long val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
if (v->counter != u) {
|
||||
val = v->counter;
|
||||
if (val != u)
|
||||
v->counter += a;
|
||||
ret = 1;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return ret;
|
||||
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add_unless);
|
||||
EXPORT_SYMBOL(atomic64_fetch_add_unless);
|
||||
|
23
lib/bch.c
23
lib/bch.c
@@ -78,15 +78,22 @@
|
||||
#define GF_M(_p) (CONFIG_BCH_CONST_M)
|
||||
#define GF_T(_p) (CONFIG_BCH_CONST_T)
|
||||
#define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1)
|
||||
#define BCH_MAX_M (CONFIG_BCH_CONST_M)
|
||||
#else
|
||||
#define GF_M(_p) ((_p)->m)
|
||||
#define GF_T(_p) ((_p)->t)
|
||||
#define GF_N(_p) ((_p)->n)
|
||||
#define BCH_MAX_M 15
|
||||
#endif
|
||||
|
||||
#define BCH_MAX_T (((1 << BCH_MAX_M) - 1) / BCH_MAX_M)
|
||||
|
||||
#define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
|
||||
#define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
|
||||
|
||||
#define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
|
||||
#define BCH_ECC_MAX_BYTES DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 8)
|
||||
|
||||
#ifndef dbg
|
||||
#define dbg(_fmt, args...) do {} while (0)
|
||||
#endif
|
||||
@@ -187,7 +194,8 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
|
||||
const unsigned int l = BCH_ECC_WORDS(bch)-1;
|
||||
unsigned int i, mlen;
|
||||
unsigned long m;
|
||||
uint32_t w, r[l+1];
|
||||
uint32_t w, r[BCH_ECC_MAX_WORDS];
|
||||
const size_t r_bytes = BCH_ECC_WORDS(bch) * sizeof(*r);
|
||||
const uint32_t * const tab0 = bch->mod8_tab;
|
||||
const uint32_t * const tab1 = tab0 + 256*(l+1);
|
||||
const uint32_t * const tab2 = tab1 + 256*(l+1);
|
||||
@@ -198,7 +206,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
|
||||
/* load ecc parity bytes into internal 32-bit buffer */
|
||||
load_ecc8(bch, bch->ecc_buf, ecc);
|
||||
} else {
|
||||
memset(bch->ecc_buf, 0, sizeof(r));
|
||||
memset(bch->ecc_buf, 0, r_bytes);
|
||||
}
|
||||
|
||||
/* process first unaligned data bytes */
|
||||
@@ -215,7 +223,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
|
||||
mlen = len/4;
|
||||
data += 4*mlen;
|
||||
len -= 4*mlen;
|
||||
memcpy(r, bch->ecc_buf, sizeof(r));
|
||||
memcpy(r, bch->ecc_buf, r_bytes);
|
||||
|
||||
/*
|
||||
* split each 32-bit word into 4 polynomials of weight 8 as follows:
|
||||
@@ -241,7 +249,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
|
||||
|
||||
r[l] = p0[l]^p1[l]^p2[l]^p3[l];
|
||||
}
|
||||
memcpy(bch->ecc_buf, r, sizeof(r));
|
||||
memcpy(bch->ecc_buf, r, r_bytes);
|
||||
|
||||
/* process last unaligned bytes */
|
||||
if (len)
|
||||
@@ -434,7 +442,7 @@ static int solve_linear_system(struct bch_control *bch, unsigned int *rows,
|
||||
{
|
||||
const int m = GF_M(bch);
|
||||
unsigned int tmp, mask;
|
||||
int rem, c, r, p, k, param[m];
|
||||
int rem, c, r, p, k, param[BCH_MAX_M];
|
||||
|
||||
k = 0;
|
||||
mask = 1 << m;
|
||||
@@ -1114,7 +1122,7 @@ static int build_deg2_base(struct bch_control *bch)
|
||||
{
|
||||
const int m = GF_M(bch);
|
||||
int i, j, r;
|
||||
unsigned int sum, x, y, remaining, ak = 0, xi[m];
|
||||
unsigned int sum, x, y, remaining, ak = 0, xi[BCH_MAX_M];
|
||||
|
||||
/* find k s.t. Tr(a^k) = 1 and 0 <= k < m */
|
||||
for (i = 0; i < m; i++) {
|
||||
@@ -1254,7 +1262,6 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
|
||||
struct bch_control *bch = NULL;
|
||||
|
||||
const int min_m = 5;
|
||||
const int max_m = 15;
|
||||
|
||||
/* default primitive polynomials */
|
||||
static const unsigned int prim_poly_tab[] = {
|
||||
@@ -1270,7 +1277,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
|
||||
goto fail;
|
||||
}
|
||||
#endif
|
||||
if ((m < min_m) || (m > max_m))
|
||||
if ((m < min_m) || (m > BCH_MAX_M))
|
||||
/*
|
||||
* values of m greater than 15 are not currently supported;
|
||||
* supporting m > 15 would require changing table base type
|
||||
|
11
lib/crc32.c
11
lib/crc32.c
@@ -27,6 +27,7 @@
|
||||
/* see: Documentation/crc32.txt for a description of algorithms */
|
||||
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/crc32poly.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
@@ -184,7 +185,7 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
|
||||
#if CRC_LE_BITS == 1
|
||||
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
|
||||
{
|
||||
return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE);
|
||||
return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
|
||||
}
|
||||
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
|
||||
{
|
||||
@@ -194,7 +195,7 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
|
||||
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
|
||||
{
|
||||
return crc32_le_generic(crc, p, len,
|
||||
(const u32 (*)[256])crc32table_le, CRCPOLY_LE);
|
||||
(const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
|
||||
}
|
||||
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
|
||||
{
|
||||
@@ -268,7 +269,7 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
|
||||
|
||||
u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
|
||||
{
|
||||
return crc32_generic_shift(crc, len, CRCPOLY_LE);
|
||||
return crc32_generic_shift(crc, len, CRC32_POLY_LE);
|
||||
}
|
||||
|
||||
u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
|
||||
@@ -330,13 +331,13 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
|
||||
#if CRC_LE_BITS == 1
|
||||
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
|
||||
{
|
||||
return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE);
|
||||
return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
|
||||
}
|
||||
#else
|
||||
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
|
||||
{
|
||||
return crc32_be_generic(crc, p, len,
|
||||
(const u32 (*)[256])crc32table_be, CRCPOLY_BE);
|
||||
(const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
|
||||
}
|
||||
#endif
|
||||
EXPORT_SYMBOL(crc32_be);
|
||||
|
@@ -1,18 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* There are multiple 16-bit CRC polynomials in common use, but this is
|
||||
* *the* standard CRC-32 polynomial, first popularized by Ethernet.
|
||||
* x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
|
||||
*/
|
||||
#define CRCPOLY_LE 0xedb88320
|
||||
#define CRCPOLY_BE 0x04c11db7
|
||||
|
||||
/*
|
||||
* This is the CRC32c polynomial, as outlined by Castagnoli.
|
||||
* x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
|
||||
* x^8+x^6+x^0
|
||||
*/
|
||||
#define CRC32C_POLY_LE 0x82F63B78
|
||||
|
||||
/* Try to choose an implementation variant via Kconfig */
|
||||
#ifdef CONFIG_CRC32_SLICEBY8
|
||||
|
@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
|
||||
|
||||
limit++;
|
||||
if (is_on_stack)
|
||||
pr_warn("object is on stack, but not annotated\n");
|
||||
pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
|
||||
task_stack_page(current));
|
||||
else
|
||||
pr_warn("object is not on stack, but annotated\n");
|
||||
pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
|
||||
task_stack_page(current));
|
||||
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
@@ -1185,8 +1188,7 @@ void __init debug_objects_mem_init(void)
|
||||
|
||||
if (!obj_cache || debug_objects_replace_static_objects()) {
|
||||
debug_objects_enabled = 0;
|
||||
if (obj_cache)
|
||||
kmem_cache_destroy(obj_cache);
|
||||
kmem_cache_destroy(obj_cache);
|
||||
pr_warn("out of memory.\n");
|
||||
} else
|
||||
debug_objects_selftest();
|
||||
|
@@ -51,6 +51,7 @@
|
||||
#endif /* STATIC */
|
||||
|
||||
#include <linux/decompress/mm.h>
|
||||
#include <linux/crc32poly.h>
|
||||
|
||||
#ifndef INT_MAX
|
||||
#define INT_MAX 0x7fffffff
|
||||
@@ -654,7 +655,7 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
|
||||
for (i = 0; i < 256; i++) {
|
||||
c = i << 24;
|
||||
for (j = 8; j; j--)
|
||||
c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
|
||||
c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1);
|
||||
bd->crc32Table[i] = c;
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <stdio.h>
|
||||
#include "../include/linux/crc32poly.h"
|
||||
#include "../include/generated/autoconf.h"
|
||||
#include "crc32defs.h"
|
||||
#include <inttypes.h>
|
||||
@@ -57,7 +58,7 @@ static void crc32init_le_generic(const uint32_t polynomial,
|
||||
|
||||
static void crc32init_le(void)
|
||||
{
|
||||
crc32init_le_generic(CRCPOLY_LE, crc32table_le);
|
||||
crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
|
||||
}
|
||||
|
||||
static void crc32cinit_le(void)
|
||||
@@ -76,7 +77,7 @@ static void crc32init_be(void)
|
||||
crc32table_be[0][0] = 0;
|
||||
|
||||
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
|
||||
crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
|
||||
crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
|
||||
for (j = 0; j < i; j++)
|
||||
crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
|
||||
}
|
||||
|
@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
|
||||
if (ioremap_pmd_enabled() &&
|
||||
((next - addr) == PMD_SIZE) &&
|
||||
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
|
||||
pmd_free_pte_page(pmd)) {
|
||||
pmd_free_pte_page(pmd, addr)) {
|
||||
if (pmd_set_huge(pmd, phys_addr + addr, prot))
|
||||
continue;
|
||||
}
|
||||
@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
|
||||
if (ioremap_pud_enabled() &&
|
||||
((next - addr) == PUD_SIZE) &&
|
||||
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
|
||||
pud_free_pmd_page(pud)) {
|
||||
pud_free_pmd_page(pud, addr)) {
|
||||
if (pud_set_huge(pud, phys_addr + addr, prot))
|
||||
continue;
|
||||
}
|
||||
|
10
lib/klist.c
10
lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
|
||||
void (*put)(struct klist_node *) = i->i_klist->put;
|
||||
struct klist_node *last = i->i_cur;
|
||||
struct klist_node *prev;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&i->i_klist->k_lock);
|
||||
spin_lock_irqsave(&i->i_klist->k_lock, flags);
|
||||
|
||||
if (last) {
|
||||
prev = to_klist_node(last->n_node.prev);
|
||||
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
|
||||
prev = to_klist_node(prev->n_node.prev);
|
||||
}
|
||||
|
||||
spin_unlock(&i->i_klist->k_lock);
|
||||
spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
|
||||
|
||||
if (put && last)
|
||||
put(last);
|
||||
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
|
||||
void (*put)(struct klist_node *) = i->i_klist->put;
|
||||
struct klist_node *last = i->i_cur;
|
||||
struct klist_node *next;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&i->i_klist->k_lock);
|
||||
spin_lock_irqsave(&i->i_klist->k_lock, flags);
|
||||
|
||||
if (last) {
|
||||
next = to_klist_node(last->n_node.next);
|
||||
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
|
||||
next = to_klist_node(next->n_node.next);
|
||||
}
|
||||
|
||||
spin_unlock(&i->i_klist->k_lock);
|
||||
spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
|
||||
|
||||
if (put && last)
|
||||
put(last);
|
||||
|
@@ -35,6 +35,25 @@ const void *kobject_namespace(struct kobject *kobj)
|
||||
return kobj->ktype->namespace(kobj);
|
||||
}
|
||||
|
||||
/**
|
||||
* kobject_get_ownership - get sysfs ownership data for @kobj
|
||||
* @kobj: kobject in question
|
||||
* @uid: kernel user ID for sysfs objects
|
||||
* @gid: kernel group ID for sysfs objects
|
||||
*
|
||||
* Returns initial uid/gid pair that should be used when creating sysfs
|
||||
* representation of given kobject. Normally used to adjust ownership of
|
||||
* objects in a container.
|
||||
*/
|
||||
void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
|
||||
{
|
||||
*uid = GLOBAL_ROOT_UID;
|
||||
*gid = GLOBAL_ROOT_GID;
|
||||
|
||||
if (kobj->ktype->get_ownership)
|
||||
kobj->ktype->get_ownership(kobj, uid, gid);
|
||||
}
|
||||
|
||||
/*
|
||||
* populate_dir - populate directory with attributes.
|
||||
* @kobj: object we're working on.
|
||||
@@ -868,9 +887,16 @@ static void kset_release(struct kobject *kobj)
|
||||
kfree(kset);
|
||||
}
|
||||
|
||||
void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
|
||||
{
|
||||
if (kobj->parent)
|
||||
kobject_get_ownership(kobj->parent, uid, gid);
|
||||
}
|
||||
|
||||
static struct kobj_type kset_ktype = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
.release = kset_release,
|
||||
.release = kset_release,
|
||||
.get_ownership = kset_get_ownership,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -29,7 +29,7 @@
|
||||
*/
|
||||
static unsigned int debug_locks_verbose;
|
||||
|
||||
static DEFINE_WW_CLASS(ww_lockdep);
|
||||
static DEFINE_WD_CLASS(ww_lockdep);
|
||||
|
||||
static int __init setup_debug_locks_verbose(char *str)
|
||||
{
|
||||
|
@@ -41,7 +41,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
|
||||
mpi_ptr_t tspace = NULL;
|
||||
mpi_ptr_t rp, ep, mp, bp;
|
||||
mpi_size_t esize, msize, bsize, rsize;
|
||||
int esign, msign, bsign, rsign;
|
||||
int msign, bsign, rsign;
|
||||
mpi_size_t size;
|
||||
int mod_shift_cnt;
|
||||
int negative_result;
|
||||
@@ -53,7 +53,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
|
||||
esize = exp->nlimbs;
|
||||
msize = mod->nlimbs;
|
||||
size = 2 * msize;
|
||||
esign = exp->sign;
|
||||
msign = mod->sign;
|
||||
|
||||
rp = res->d;
|
||||
|
@@ -253,8 +253,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
|
||||
if (policy) {
|
||||
err = validate_nla(nla, maxtype, policy);
|
||||
if (err < 0) {
|
||||
if (extack)
|
||||
extack->bad_attr = nla;
|
||||
NL_SET_ERR_MSG_ATTR(extack, nla,
|
||||
"Attribute failed policy validation");
|
||||
goto errout;
|
||||
}
|
||||
}
|
||||
|
@@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
|
||||
|
||||
bool nmi_cpu_backtrace(struct pt_regs *regs)
|
||||
{
|
||||
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
|
||||
arch_spin_lock(&lock);
|
||||
if (regs && cpu_in_idle(instruction_pointer(regs))) {
|
||||
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
|
||||
cpu, (void *)instruction_pointer(regs));
|
||||
@@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
|
||||
else
|
||||
dump_stack();
|
||||
}
|
||||
arch_spin_unlock(&lock);
|
||||
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
||||
return true;
|
||||
}
|
||||
|
370
lib/percpu_ida.c
370
lib/percpu_ida.c
@@ -1,370 +0,0 @@
|
||||
/*
|
||||
* Percpu IDA library
|
||||
*
|
||||
* Copyright (C) 2013 Datera, Inc. Kent Overstreet
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2, or (at
|
||||
* your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/percpu_ida.h>
|
||||
|
||||
struct percpu_ida_cpu {
|
||||
/*
|
||||
* Even though this is percpu, we need a lock for tag stealing by remote
|
||||
* CPUs:
|
||||
*/
|
||||
spinlock_t lock;
|
||||
|
||||
/* nr_free/freelist form a stack of free IDs */
|
||||
unsigned nr_free;
|
||||
unsigned freelist[];
|
||||
};
|
||||
|
||||
static inline void move_tags(unsigned *dst, unsigned *dst_nr,
|
||||
unsigned *src, unsigned *src_nr,
|
||||
unsigned nr)
|
||||
{
|
||||
*src_nr -= nr;
|
||||
memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
|
||||
*dst_nr += nr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to steal tags from a remote cpu's percpu freelist.
|
||||
*
|
||||
* We first check how many percpu freelists have tags
|
||||
*
|
||||
* Then we iterate through the cpus until we find some tags - we don't attempt
|
||||
* to find the "best" cpu to steal from, to keep cacheline bouncing to a
|
||||
* minimum.
|
||||
*/
|
||||
static inline void steal_tags(struct percpu_ida *pool,
|
||||
struct percpu_ida_cpu *tags)
|
||||
{
|
||||
unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
|
||||
struct percpu_ida_cpu *remote;
|
||||
|
||||
for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
|
||||
cpus_have_tags; cpus_have_tags--) {
|
||||
cpu = cpumask_next(cpu, &pool->cpus_have_tags);
|
||||
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
cpu = cpumask_first(&pool->cpus_have_tags);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
BUG();
|
||||
}
|
||||
|
||||
pool->cpu_last_stolen = cpu;
|
||||
remote = per_cpu_ptr(pool->tag_cpu, cpu);
|
||||
|
||||
cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
|
||||
|
||||
if (remote == tags)
|
||||
continue;
|
||||
|
||||
spin_lock(&remote->lock);
|
||||
|
||||
if (remote->nr_free) {
|
||||
memcpy(tags->freelist,
|
||||
remote->freelist,
|
||||
sizeof(unsigned) * remote->nr_free);
|
||||
|
||||
tags->nr_free = remote->nr_free;
|
||||
remote->nr_free = 0;
|
||||
}
|
||||
|
||||
spin_unlock(&remote->lock);
|
||||
|
||||
if (tags->nr_free)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
|
||||
* our percpu freelist:
|
||||
*/
|
||||
static inline void alloc_global_tags(struct percpu_ida *pool,
|
||||
struct percpu_ida_cpu *tags)
|
||||
{
|
||||
move_tags(tags->freelist, &tags->nr_free,
|
||||
pool->freelist, &pool->nr_free,
|
||||
min(pool->nr_free, pool->percpu_batch_size));
|
||||
}
|
||||
|
||||
/**
|
||||
* percpu_ida_alloc - allocate a tag
|
||||
* @pool: pool to allocate from
|
||||
* @state: task state for prepare_to_wait
|
||||
*
|
||||
* Returns a tag - an integer in the range [0..nr_tags) (passed to
|
||||
* tag_pool_init()), or otherwise -ENOSPC on allocation failure.
|
||||
*
|
||||
* Safe to be called from interrupt context (assuming it isn't passed
|
||||
* TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
|
||||
*
|
||||
* @gfp indicates whether or not to wait until a free id is available (it's not
|
||||
* used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
|
||||
* however long it takes until another thread frees an id (same semantics as a
|
||||
* mempool).
|
||||
*
|
||||
* Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
|
||||
*/
|
||||
int percpu_ida_alloc(struct percpu_ida *pool, int state)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
struct percpu_ida_cpu *tags;
|
||||
unsigned long flags;
|
||||
int tag = -ENOSPC;
|
||||
|
||||
tags = raw_cpu_ptr(pool->tag_cpu);
|
||||
spin_lock_irqsave(&tags->lock, flags);
|
||||
|
||||
/* Fastpath */
|
||||
if (likely(tags->nr_free)) {
|
||||
tag = tags->freelist[--tags->nr_free];
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
return tag;
|
||||
}
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
tags = this_cpu_ptr(pool->tag_cpu);
|
||||
|
||||
/*
|
||||
* prepare_to_wait() must come before steal_tags(), in case
|
||||
* percpu_ida_free() on another cpu flips a bit in
|
||||
* cpus_have_tags
|
||||
*
|
||||
* global lock held and irqs disabled, don't need percpu lock
|
||||
*/
|
||||
if (state != TASK_RUNNING)
|
||||
prepare_to_wait(&pool->wait, &wait, state);
|
||||
|
||||
if (!tags->nr_free)
|
||||
alloc_global_tags(pool, tags);
|
||||
if (!tags->nr_free)
|
||||
steal_tags(pool, tags);
|
||||
|
||||
if (tags->nr_free) {
|
||||
tag = tags->freelist[--tags->nr_free];
|
||||
if (tags->nr_free)
|
||||
cpumask_set_cpu(smp_processor_id(),
|
||||
&pool->cpus_have_tags);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
|
||||
if (tag >= 0 || state == TASK_RUNNING)
|
||||
break;
|
||||
|
||||
if (signal_pending_state(state, current)) {
|
||||
tag = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
schedule();
|
||||
}
|
||||
if (state != TASK_RUNNING)
|
||||
finish_wait(&pool->wait, &wait);
|
||||
|
||||
return tag;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_ida_alloc);
|
||||
|
||||
/**
|
||||
* percpu_ida_free - free a tag
|
||||
* @pool: pool @tag was allocated from
|
||||
* @tag: a tag previously allocated with percpu_ida_alloc()
|
||||
*
|
||||
* Safe to be called from interrupt context.
|
||||
*/
|
||||
void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
|
||||
{
|
||||
struct percpu_ida_cpu *tags;
|
||||
unsigned long flags;
|
||||
unsigned nr_free;
|
||||
|
||||
BUG_ON(tag >= pool->nr_tags);
|
||||
|
||||
tags = raw_cpu_ptr(pool->tag_cpu);
|
||||
|
||||
spin_lock_irqsave(&tags->lock, flags);
|
||||
tags->freelist[tags->nr_free++] = tag;
|
||||
|
||||
nr_free = tags->nr_free;
|
||||
|
||||
if (nr_free == 1) {
|
||||
cpumask_set_cpu(smp_processor_id(),
|
||||
&pool->cpus_have_tags);
|
||||
wake_up(&pool->wait);
|
||||
}
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
|
||||
if (nr_free == pool->percpu_max_size) {
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
spin_lock(&tags->lock);
|
||||
|
||||
if (tags->nr_free == pool->percpu_max_size) {
|
||||
move_tags(pool->freelist, &pool->nr_free,
|
||||
tags->freelist, &tags->nr_free,
|
||||
pool->percpu_batch_size);
|
||||
|
||||
wake_up(&pool->wait);
|
||||
}
|
||||
spin_unlock(&tags->lock);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_ida_free);
|
||||
|
||||
/**
|
||||
* percpu_ida_destroy - release a tag pool's resources
|
||||
* @pool: pool to free
|
||||
*
|
||||
* Frees the resources allocated by percpu_ida_init().
|
||||
*/
|
||||
void percpu_ida_destroy(struct percpu_ida *pool)
|
||||
{
|
||||
free_percpu(pool->tag_cpu);
|
||||
free_pages((unsigned long) pool->freelist,
|
||||
get_order(pool->nr_tags * sizeof(unsigned)));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_ida_destroy);
|
||||
|
||||
/**
|
||||
* percpu_ida_init - initialize a percpu tag pool
|
||||
* @pool: pool to initialize
|
||||
* @nr_tags: number of tags that will be available for allocation
|
||||
*
|
||||
* Initializes @pool so that it can be used to allocate tags - integers in the
|
||||
* range [0, nr_tags). Typically, they'll be used by driver code to refer to a
|
||||
* preallocated array of tag structures.
|
||||
*
|
||||
* Allocation is percpu, but sharding is limited by nr_tags - for best
|
||||
* performance, the workload should not span more cpus than nr_tags / 128.
|
||||
*/
|
||||
int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
|
||||
unsigned long max_size, unsigned long batch_size)
|
||||
{
|
||||
unsigned i, cpu, order;
|
||||
|
||||
memset(pool, 0, sizeof(*pool));
|
||||
|
||||
init_waitqueue_head(&pool->wait);
|
||||
spin_lock_init(&pool->lock);
|
||||
pool->nr_tags = nr_tags;
|
||||
pool->percpu_max_size = max_size;
|
||||
pool->percpu_batch_size = batch_size;
|
||||
|
||||
/* Guard against overflow */
|
||||
if (nr_tags > (unsigned) INT_MAX + 1) {
|
||||
pr_err("percpu_ida_init(): nr_tags too large\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
order = get_order(nr_tags * sizeof(unsigned));
|
||||
pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
|
||||
if (!pool->freelist)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nr_tags; i++)
|
||||
pool->freelist[i] = i;
|
||||
|
||||
pool->nr_free = nr_tags;
|
||||
|
||||
pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
|
||||
pool->percpu_max_size * sizeof(unsigned),
|
||||
sizeof(unsigned));
|
||||
if (!pool->tag_cpu)
|
||||
goto err;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
percpu_ida_destroy(pool);
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__percpu_ida_init);
|
||||
|
||||
/**
|
||||
* percpu_ida_for_each_free - iterate free ids of a pool
|
||||
* @pool: pool to iterate
|
||||
* @fn: interate callback function
|
||||
* @data: parameter for @fn
|
||||
*
|
||||
* Note, this doesn't guarantee to iterate all free ids restrictly. Some free
|
||||
* ids might be missed, some might be iterated duplicated, and some might
|
||||
* be iterated and not free soon.
|
||||
*/
|
||||
int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
|
||||
void *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct percpu_ida_cpu *remote;
|
||||
unsigned cpu, i, err = 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
remote = per_cpu_ptr(pool->tag_cpu, cpu);
|
||||
spin_lock_irqsave(&remote->lock, flags);
|
||||
for (i = 0; i < remote->nr_free; i++) {
|
||||
err = fn(remote->freelist[i], data);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&remote->lock, flags);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
for (i = 0; i < pool->nr_free; i++) {
|
||||
err = fn(pool->freelist[i], data);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
|
||||
|
||||
/**
|
||||
* percpu_ida_free_tags - return free tags number of a specific cpu or global pool
|
||||
* @pool: pool related
|
||||
* @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
|
||||
*
|
||||
* Note: this just returns a snapshot of free tags number.
|
||||
*/
|
||||
unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
|
||||
{
|
||||
struct percpu_ida_cpu *remote;
|
||||
if (cpu == nr_cpu_ids)
|
||||
return pool->nr_free;
|
||||
remote = per_cpu_ptr(pool->tag_cpu, cpu);
|
||||
return remote->nr_free;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
|
@@ -55,22 +55,24 @@ static inline void XOR(int x, int y, int z)
|
||||
asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
|
||||
}
|
||||
|
||||
static inline void LOAD_DATA(int x, int n, u8 *ptr)
|
||||
static inline void LOAD_DATA(int x, u8 *ptr)
|
||||
{
|
||||
typedef struct { u8 _[16*n]; } addrtype;
|
||||
typedef struct { u8 _[16 * $#]; } addrtype;
|
||||
register addrtype *__ptr asm("1") = (addrtype *) ptr;
|
||||
|
||||
asm volatile ("VLM %2,%3,0,%r1"
|
||||
: : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1));
|
||||
: : "m" (*__ptr), "a" (__ptr), "i" (x),
|
||||
"i" (x + $# - 1));
|
||||
}
|
||||
|
||||
static inline void STORE_DATA(int x, int n, u8 *ptr)
|
||||
static inline void STORE_DATA(int x, u8 *ptr)
|
||||
{
|
||||
typedef struct { u8 _[16*n]; } addrtype;
|
||||
typedef struct { u8 _[16 * $#]; } addrtype;
|
||||
register addrtype *__ptr asm("1") = (addrtype *) ptr;
|
||||
|
||||
asm volatile ("VSTM %2,%3,0,1"
|
||||
: "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1));
|
||||
: "=m" (*__ptr) : "a" (__ptr), "i" (x),
|
||||
"i" (x + $# - 1));
|
||||
}
|
||||
|
||||
static inline void COPY_VEC(int x, int y)
|
||||
@@ -93,19 +95,19 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
|
||||
q = dptr[z0 + 2]; /* RS syndrome */
|
||||
|
||||
for (d = 0; d < bytes; d += $#*NSIZE) {
|
||||
LOAD_DATA(0,$#,&dptr[z0][d]);
|
||||
LOAD_DATA(0,&dptr[z0][d]);
|
||||
COPY_VEC(8+$$,0+$$);
|
||||
for (z = z0 - 1; z >= 0; z--) {
|
||||
MASK(16+$$,8+$$);
|
||||
AND(16+$$,16+$$,25);
|
||||
SHLBYTE(8+$$,8+$$);
|
||||
XOR(8+$$,8+$$,16+$$);
|
||||
LOAD_DATA(16,$#,&dptr[z][d]);
|
||||
LOAD_DATA(16,&dptr[z][d]);
|
||||
XOR(0+$$,0+$$,16+$$);
|
||||
XOR(8+$$,8+$$,16+$$);
|
||||
}
|
||||
STORE_DATA(0,$#,&p[d]);
|
||||
STORE_DATA(8,$#,&q[d]);
|
||||
STORE_DATA(0,&p[d]);
|
||||
STORE_DATA(8,&q[d]);
|
||||
}
|
||||
kernel_fpu_end(&vxstate, KERNEL_VXR);
|
||||
}
|
||||
@@ -127,14 +129,14 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
|
||||
|
||||
for (d = 0; d < bytes; d += $#*NSIZE) {
|
||||
/* P/Q data pages */
|
||||
LOAD_DATA(0,$#,&dptr[z0][d]);
|
||||
LOAD_DATA(0,&dptr[z0][d]);
|
||||
COPY_VEC(8+$$,0+$$);
|
||||
for (z = z0 - 1; z >= start; z--) {
|
||||
MASK(16+$$,8+$$);
|
||||
AND(16+$$,16+$$,25);
|
||||
SHLBYTE(8+$$,8+$$);
|
||||
XOR(8+$$,8+$$,16+$$);
|
||||
LOAD_DATA(16,$#,&dptr[z][d]);
|
||||
LOAD_DATA(16,&dptr[z][d]);
|
||||
XOR(0+$$,0+$$,16+$$);
|
||||
XOR(8+$$,8+$$,16+$$);
|
||||
}
|
||||
@@ -145,12 +147,12 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
|
||||
SHLBYTE(8+$$,8+$$);
|
||||
XOR(8+$$,8+$$,16+$$);
|
||||
}
|
||||
LOAD_DATA(16,$#,&p[d]);
|
||||
LOAD_DATA(16,&p[d]);
|
||||
XOR(16+$$,16+$$,0+$$);
|
||||
STORE_DATA(16,$#,&p[d]);
|
||||
LOAD_DATA(16,$#,&q[d]);
|
||||
STORE_DATA(16,&p[d]);
|
||||
LOAD_DATA(16,&q[d]);
|
||||
XOR(16+$$,16+$$,8+$$);
|
||||
STORE_DATA(16,$#,&q[d]);
|
||||
STORE_DATA(16,&q[d]);
|
||||
}
|
||||
kernel_fpu_end(&vxstate, KERNEL_VXR);
|
||||
}
|
||||
|
@@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/div64.h>
|
||||
#include <linux/reciprocal_div.h>
|
||||
@@ -26,3 +27,43 @@ struct reciprocal_value reciprocal_value(u32 d)
|
||||
return R;
|
||||
}
|
||||
EXPORT_SYMBOL(reciprocal_value);
|
||||
|
||||
struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
|
||||
{
|
||||
struct reciprocal_value_adv R;
|
||||
u32 l, post_shift;
|
||||
u64 mhigh, mlow;
|
||||
|
||||
/* ceil(log2(d)) */
|
||||
l = fls(d - 1);
|
||||
/* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
|
||||
* be handled before calling "reciprocal_value_adv", please see the
|
||||
* comment at include/linux/reciprocal_div.h.
|
||||
*/
|
||||
WARN(l == 32,
|
||||
"ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
|
||||
d, __func__);
|
||||
post_shift = l;
|
||||
mlow = 1ULL << (32 + l);
|
||||
do_div(mlow, d);
|
||||
mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
|
||||
do_div(mhigh, d);
|
||||
|
||||
for (; post_shift > 0; post_shift--) {
|
||||
u64 lo = mlow >> 1, hi = mhigh >> 1;
|
||||
|
||||
if (lo >= hi)
|
||||
break;
|
||||
|
||||
mlow = lo;
|
||||
mhigh = hi;
|
||||
}
|
||||
|
||||
R.m = (u32)mhigh;
|
||||
R.sh = post_shift;
|
||||
R.exp = l;
|
||||
R.is_wide_m = mhigh > U32_MAX;
|
||||
|
||||
return R;
|
||||
}
|
||||
EXPORT_SYMBOL(reciprocal_value_adv);
|
||||
|
@@ -283,7 +283,7 @@ out:
|
||||
* in index form
|
||||
* @prim: primitive element to generate polynomial roots
|
||||
* @nroots: RS code generator polynomial degree (number of roots)
|
||||
* @gfp: GFP_ flags for allocations
|
||||
* @gfp: Memory allocation flags.
|
||||
*/
|
||||
struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim,
|
||||
int nroots, gfp_t gfp)
|
||||
|
@@ -35,13 +35,13 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
#ifdef CONFIG_REFCOUNT_FULL
|
||||
|
||||
/**
|
||||
* refcount_add_not_zero - add a value to a refcount unless it is 0
|
||||
* refcount_add_not_zero_checked - add a value to a refcount unless it is 0
|
||||
* @i: the value to add to the refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
@@ -58,7 +58,7 @@
|
||||
*
|
||||
* Return: false if the passed refcount is 0, true otherwise
|
||||
*/
|
||||
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
|
||||
bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
|
||||
{
|
||||
unsigned int new, val = atomic_read(&r->refs);
|
||||
|
||||
@@ -79,10 +79,10 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(refcount_add_not_zero);
|
||||
EXPORT_SYMBOL(refcount_add_not_zero_checked);
|
||||
|
||||
/**
|
||||
* refcount_add - add a value to a refcount
|
||||
* refcount_add_checked - add a value to a refcount
|
||||
* @i: the value to add to the refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(refcount_add_not_zero);
|
||||
* cases, refcount_inc(), or one of its variants, should instead be used to
|
||||
* increment a reference count.
|
||||
*/
|
||||
void refcount_add(unsigned int i, refcount_t *r)
|
||||
void refcount_add_checked(unsigned int i, refcount_t *r)
|
||||
{
|
||||
WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
|
||||
WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
|
||||
}
|
||||
EXPORT_SYMBOL(refcount_add);
|
||||
EXPORT_SYMBOL(refcount_add_checked);
|
||||
|
||||
/**
|
||||
* refcount_inc_not_zero - increment a refcount unless it is 0
|
||||
* refcount_inc_not_zero_checked - increment a refcount unless it is 0
|
||||
* @r: the refcount to increment
|
||||
*
|
||||
* Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
|
||||
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(refcount_add);
|
||||
*
|
||||
* Return: true if the increment was successful, false otherwise
|
||||
*/
|
||||
bool refcount_inc_not_zero(refcount_t *r)
|
||||
bool refcount_inc_not_zero_checked(refcount_t *r)
|
||||
{
|
||||
unsigned int new, val = atomic_read(&r->refs);
|
||||
|
||||
@@ -134,10 +134,10 @@ bool refcount_inc_not_zero(refcount_t *r)
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(refcount_inc_not_zero);
|
||||
EXPORT_SYMBOL(refcount_inc_not_zero_checked);
|
||||
|
||||
/**
|
||||
* refcount_inc - increment a refcount
|
||||
* refcount_inc_checked - increment a refcount
|
||||
* @r: the refcount to increment
|
||||
*
|
||||
* Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
|
||||
@@ -148,14 +148,14 @@ EXPORT_SYMBOL(refcount_inc_not_zero);
|
||||
* Will WARN if the refcount is 0, as this represents a possible use-after-free
|
||||
* condition.
|
||||
*/
|
||||
void refcount_inc(refcount_t *r)
|
||||
void refcount_inc_checked(refcount_t *r)
|
||||
{
|
||||
WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
|
||||
WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
|
||||
}
|
||||
EXPORT_SYMBOL(refcount_inc);
|
||||
EXPORT_SYMBOL(refcount_inc_checked);
|
||||
|
||||
/**
|
||||
* refcount_sub_and_test - subtract from a refcount and test if it is 0
|
||||
* refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
|
||||
* @i: amount to subtract from the refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(refcount_inc);
|
||||
*
|
||||
* Return: true if the resulting refcount is 0, false otherwise
|
||||
*/
|
||||
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
|
||||
bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
|
||||
{
|
||||
unsigned int new, val = atomic_read(&r->refs);
|
||||
|
||||
@@ -192,10 +192,10 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
|
||||
|
||||
return !new;
|
||||
}
|
||||
EXPORT_SYMBOL(refcount_sub_and_test);
|
||||
EXPORT_SYMBOL(refcount_sub_and_test_checked);
|
||||
|
||||
/**
|
||||
* refcount_dec_and_test - decrement a refcount and test if it is 0
|
||||
* refcount_dec_and_test_checked - decrement a refcount and test if it is 0
|
||||
* @r: the refcount
|
||||
*
|
||||
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
|
||||
@@ -207,14 +207,14 @@ EXPORT_SYMBOL(refcount_sub_and_test);
|
||||
*
|
||||
* Return: true if the resulting refcount is 0, false otherwise
|
||||
*/
|
||||
bool refcount_dec_and_test(refcount_t *r)
|
||||
bool refcount_dec_and_test_checked(refcount_t *r)
|
||||
{
|
||||
return refcount_sub_and_test(1, r);
|
||||
return refcount_sub_and_test_checked(1, r);
|
||||
}
|
||||
EXPORT_SYMBOL(refcount_dec_and_test);
|
||||
EXPORT_SYMBOL(refcount_dec_and_test_checked);
|
||||
|
||||
/**
|
||||
* refcount_dec - decrement a refcount
|
||||
* refcount_dec_checked - decrement a refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
|
||||
@@ -223,12 +223,11 @@ EXPORT_SYMBOL(refcount_dec_and_test);
|
||||
* Provides release memory ordering, such that prior loads and stores are done
|
||||
* before.
|
||||
*/
|
||||
void refcount_dec(refcount_t *r)
|
||||
void refcount_dec_checked(refcount_t *r)
|
||||
{
|
||||
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
|
||||
WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
|
||||
}
|
||||
EXPORT_SYMBOL(refcount_dec);
|
||||
#endif /* CONFIG_REFCOUNT_FULL */
|
||||
EXPORT_SYMBOL(refcount_dec_checked);
|
||||
|
||||
/**
|
||||
* refcount_dec_if_one - decrement a refcount if it is 1
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/rhashtable.h>
|
||||
|
||||
#define HASH_DEFAULT_SIZE 64UL
|
||||
#define HASH_MIN_SIZE 4U
|
||||
@@ -115,8 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
|
||||
|
||||
static union nested_table *nested_table_alloc(struct rhashtable *ht,
|
||||
union nested_table __rcu **prev,
|
||||
unsigned int shifted,
|
||||
unsigned int nhash)
|
||||
bool leaf)
|
||||
{
|
||||
union nested_table *ntbl;
|
||||
int i;
|
||||
@@ -127,10 +127,9 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
|
||||
|
||||
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
|
||||
|
||||
if (ntbl && shifted) {
|
||||
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
|
||||
INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
|
||||
(i << shifted) | nhash);
|
||||
if (ntbl && leaf) {
|
||||
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
|
||||
INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
|
||||
}
|
||||
|
||||
rcu_assign_pointer(*prev, ntbl);
|
||||
@@ -156,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
|
||||
return NULL;
|
||||
|
||||
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
|
||||
0, 0)) {
|
||||
false)) {
|
||||
kfree(tbl);
|
||||
return NULL;
|
||||
}
|
||||
@@ -206,7 +205,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
||||
tbl->hash_rnd = get_random_u32();
|
||||
|
||||
for (i = 0; i < nbuckets; i++)
|
||||
INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
|
||||
INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
|
||||
|
||||
return tbl;
|
||||
}
|
||||
@@ -227,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
|
||||
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
|
||||
{
|
||||
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
struct bucket_table *new_tbl = rhashtable_last_table(ht,
|
||||
rht_dereference_rcu(old_tbl->future_tbl, ht));
|
||||
struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
|
||||
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
|
||||
int err = -EAGAIN;
|
||||
struct rhash_head *head, *next, *entry;
|
||||
@@ -298,21 +296,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
|
||||
struct bucket_table *old_tbl,
|
||||
struct bucket_table *new_tbl)
|
||||
{
|
||||
/* Protect future_tbl using the first bucket lock. */
|
||||
spin_lock_bh(old_tbl->locks);
|
||||
|
||||
/* Did somebody beat us to it? */
|
||||
if (rcu_access_pointer(old_tbl->future_tbl)) {
|
||||
spin_unlock_bh(old_tbl->locks);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
/* Make insertions go into the new, empty table right away. Deletions
|
||||
* and lookups will be attempted in both tables until we synchronize.
|
||||
* As cmpxchg() provides strong barriers, we do not need
|
||||
* rcu_assign_pointer().
|
||||
*/
|
||||
rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
|
||||
|
||||
spin_unlock_bh(old_tbl->locks);
|
||||
if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
|
||||
return -EEXIST;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -475,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
|
||||
|
||||
fail:
|
||||
/* Do not fail the insert if someone else did a rehash. */
|
||||
if (likely(rcu_dereference_raw(tbl->future_tbl)))
|
||||
if (likely(rcu_access_pointer(tbl->future_tbl)))
|
||||
return 0;
|
||||
|
||||
/* Schedule async rehash to retry allocation in process context. */
|
||||
@@ -548,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
|
||||
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
|
||||
return ERR_CAST(data);
|
||||
|
||||
new_tbl = rcu_dereference(tbl->future_tbl);
|
||||
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
if (new_tbl)
|
||||
return new_tbl;
|
||||
|
||||
@@ -607,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
|
||||
break;
|
||||
|
||||
spin_unlock_bh(lock);
|
||||
tbl = rcu_dereference(tbl->future_tbl);
|
||||
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
}
|
||||
|
||||
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
|
||||
@@ -1002,7 +993,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
|
||||
* .key_offset = offsetof(struct test_obj, key),
|
||||
* .key_len = sizeof(int),
|
||||
* .hashfn = jhash,
|
||||
* .nulls_base = (1U << RHT_BASE_SHIFT),
|
||||
* };
|
||||
*
|
||||
* Configuration Example 2: Variable length keys
|
||||
@@ -1034,9 +1024,6 @@ int rhashtable_init(struct rhashtable *ht,
|
||||
(params->obj_hashfn && !params->obj_cmpfn))
|
||||
return -EINVAL;
|
||||
|
||||
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
|
||||
return -EINVAL;
|
||||
|
||||
memset(ht, 0, sizeof(*ht));
|
||||
mutex_init(&ht->mutex);
|
||||
spin_lock_init(&ht->lock);
|
||||
@@ -1100,10 +1087,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* No rhlist NULLs marking for now. */
|
||||
if (params->nulls_base)
|
||||
return -EINVAL;
|
||||
|
||||
err = rhashtable_init(&hlt->ht, params);
|
||||
hlt->ht.rhlist = true;
|
||||
return err;
|
||||
@@ -1227,25 +1210,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
|
||||
unsigned int index = hash & ((1 << tbl->nest) - 1);
|
||||
unsigned int size = tbl->size >> tbl->nest;
|
||||
union nested_table *ntbl;
|
||||
unsigned int shifted;
|
||||
unsigned int nhash;
|
||||
|
||||
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
|
||||
hash >>= tbl->nest;
|
||||
nhash = index;
|
||||
shifted = tbl->nest;
|
||||
ntbl = nested_table_alloc(ht, &ntbl[index].table,
|
||||
size <= (1 << shift) ? shifted : 0, nhash);
|
||||
size <= (1 << shift));
|
||||
|
||||
while (ntbl && size > (1 << shift)) {
|
||||
index = hash & ((1 << shift) - 1);
|
||||
size >>= shift;
|
||||
hash >>= shift;
|
||||
nhash |= index << shifted;
|
||||
shifted += shift;
|
||||
ntbl = nested_table_alloc(ht, &ntbl[index].table,
|
||||
size <= (1 << shift) ? shifted : 0,
|
||||
nhash);
|
||||
size <= (1 << shift));
|
||||
}
|
||||
|
||||
if (!ntbl)
|
||||
|
168
lib/test_bitfield.c
Normal file
168
lib/test_bitfield.c
Normal file
@@ -0,0 +1,168 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Test cases for bitfield helpers.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
#define CHECK_ENC_GET_U(tp, v, field, res) do { \
|
||||
{ \
|
||||
u##tp _res; \
|
||||
\
|
||||
_res = u##tp##_encode_bits(v, field); \
|
||||
if (_res != res) { \
|
||||
pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\
|
||||
(u64)_res); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
if (u##tp##_get_bits(_res, field) != v) \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define CHECK_ENC_GET_LE(tp, v, field, res) do { \
|
||||
{ \
|
||||
__le##tp _res; \
|
||||
\
|
||||
_res = le##tp##_encode_bits(v, field); \
|
||||
if (_res != cpu_to_le##tp(res)) { \
|
||||
pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
|
||||
(u64)le##tp##_to_cpu(_res), \
|
||||
(u64)(res)); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
if (le##tp##_get_bits(_res, field) != v) \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define CHECK_ENC_GET_BE(tp, v, field, res) do { \
|
||||
{ \
|
||||
__be##tp _res; \
|
||||
\
|
||||
_res = be##tp##_encode_bits(v, field); \
|
||||
if (_res != cpu_to_be##tp(res)) { \
|
||||
pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
|
||||
(u64)be##tp##_to_cpu(_res), \
|
||||
(u64)(res)); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
if (be##tp##_get_bits(_res, field) != v) \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define CHECK_ENC_GET(tp, v, field, res) do { \
|
||||
CHECK_ENC_GET_U(tp, v, field, res); \
|
||||
CHECK_ENC_GET_LE(tp, v, field, res); \
|
||||
CHECK_ENC_GET_BE(tp, v, field, res); \
|
||||
} while (0)
|
||||
|
||||
static int test_constants(void)
|
||||
{
|
||||
/*
|
||||
* NOTE
|
||||
* This whole function compiles (or at least should, if everything
|
||||
* is going according to plan) to nothing after optimisation.
|
||||
*/
|
||||
|
||||
CHECK_ENC_GET(16, 1, 0x000f, 0x0001);
|
||||
CHECK_ENC_GET(16, 3, 0x00f0, 0x0030);
|
||||
CHECK_ENC_GET(16, 5, 0x0f00, 0x0500);
|
||||
CHECK_ENC_GET(16, 7, 0xf000, 0x7000);
|
||||
CHECK_ENC_GET(16, 14, 0x000f, 0x000e);
|
||||
CHECK_ENC_GET(16, 15, 0x00f0, 0x00f0);
|
||||
|
||||
CHECK_ENC_GET_U(8, 1, 0x0f, 0x01);
|
||||
CHECK_ENC_GET_U(8, 3, 0xf0, 0x30);
|
||||
CHECK_ENC_GET_U(8, 14, 0x0f, 0x0e);
|
||||
CHECK_ENC_GET_U(8, 15, 0xf0, 0xf0);
|
||||
|
||||
CHECK_ENC_GET(32, 1, 0x00000f00, 0x00000100);
|
||||
CHECK_ENC_GET(32, 3, 0x0000f000, 0x00003000);
|
||||
CHECK_ENC_GET(32, 5, 0x000f0000, 0x00050000);
|
||||
CHECK_ENC_GET(32, 7, 0x00f00000, 0x00700000);
|
||||
CHECK_ENC_GET(32, 14, 0x0f000000, 0x0e000000);
|
||||
CHECK_ENC_GET(32, 15, 0xf0000000, 0xf0000000);
|
||||
|
||||
CHECK_ENC_GET(64, 1, 0x00000f0000000000ull, 0x0000010000000000ull);
|
||||
CHECK_ENC_GET(64, 3, 0x0000f00000000000ull, 0x0000300000000000ull);
|
||||
CHECK_ENC_GET(64, 5, 0x000f000000000000ull, 0x0005000000000000ull);
|
||||
CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
|
||||
CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
|
||||
CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CHECK(tp, mask) do { \
|
||||
u64 v; \
|
||||
\
|
||||
for (v = 0; v < 1 << hweight32(mask); v++) \
|
||||
if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \
|
||||
return -EINVAL; \
|
||||
} while (0)
|
||||
|
||||
static int test_variables(void)
|
||||
{
|
||||
CHECK(u8, 0x0f);
|
||||
CHECK(u8, 0xf0);
|
||||
CHECK(u8, 0x38);
|
||||
|
||||
CHECK(u16, 0x0038);
|
||||
CHECK(u16, 0x0380);
|
||||
CHECK(u16, 0x3800);
|
||||
CHECK(u16, 0x8000);
|
||||
|
||||
CHECK(u32, 0x80000000);
|
||||
CHECK(u32, 0x7f000000);
|
||||
CHECK(u32, 0x07e00000);
|
||||
CHECK(u32, 0x00018000);
|
||||
|
||||
CHECK(u64, 0x8000000000000000ull);
|
||||
CHECK(u64, 0x7f00000000000000ull);
|
||||
CHECK(u64, 0x0001800000000000ull);
|
||||
CHECK(u64, 0x0000000080000000ull);
|
||||
CHECK(u64, 0x000000007f000000ull);
|
||||
CHECK(u64, 0x0000000018000000ull);
|
||||
CHECK(u64, 0x0000001f8000000ull);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init test_bitfields(void)
|
||||
{
|
||||
int ret = test_constants();
|
||||
|
||||
if (ret) {
|
||||
pr_warn("constant tests failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = test_variables();
|
||||
if (ret) {
|
||||
pr_warn("variable tests failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef TEST_BITFIELD_COMPILE
|
||||
/* these should fail compilation */
|
||||
CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
|
||||
u32_encode_bits(7, 0x06000000);
|
||||
|
||||
/* this should at least give a warning */
|
||||
u16_encode_bits(0, 0x60000);
|
||||
#endif
|
||||
|
||||
pr_info("tests passed\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(test_bitfields)
|
||||
|
||||
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
|
||||
MODULE_LICENSE("GPL");
|
@@ -206,6 +206,7 @@ test_string(void)
|
||||
#define PTR_WIDTH 16
|
||||
#define PTR ((void *)0xffff0123456789abUL)
|
||||
#define PTR_STR "ffff0123456789ab"
|
||||
#define PTR_VAL_NO_CRNG "(____ptrval____)"
|
||||
#define ZEROS "00000000" /* hex 32 zero bits */
|
||||
|
||||
static int __init
|
||||
@@ -216,7 +217,16 @@ plain_format(void)
|
||||
|
||||
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
|
||||
|
||||
if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
|
||||
if (nchars != PTR_WIDTH)
|
||||
return -1;
|
||||
|
||||
if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
|
||||
pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
|
||||
PTR_VAL_NO_CRNG);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
@@ -227,6 +237,7 @@ plain_format(void)
|
||||
#define PTR_WIDTH 8
|
||||
#define PTR ((void *)0x456789ab)
|
||||
#define PTR_STR "456789ab"
|
||||
#define PTR_VAL_NO_CRNG "(ptrval)"
|
||||
|
||||
static int __init
|
||||
plain_format(void)
|
||||
@@ -245,7 +256,16 @@ plain_hash(void)
|
||||
|
||||
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
|
||||
|
||||
if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
|
||||
if (nchars != PTR_WIDTH)
|
||||
return -1;
|
||||
|
||||
if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
|
||||
pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
|
||||
PTR_VAL_NO_CRNG);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
@@ -83,7 +83,7 @@ static u32 my_hashfn(const void *data, u32 len, u32 seed)
|
||||
{
|
||||
const struct test_obj_rhl *obj = data;
|
||||
|
||||
return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
|
||||
return (obj->value.id % 10);
|
||||
}
|
||||
|
||||
static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
|
||||
@@ -99,7 +99,6 @@ static struct rhashtable_params test_rht_params = {
|
||||
.key_offset = offsetof(struct test_obj, value),
|
||||
.key_len = sizeof(struct test_obj_val),
|
||||
.hashfn = jhash,
|
||||
.nulls_base = (3U << RHT_BASE_SHIFT),
|
||||
};
|
||||
|
||||
static struct rhashtable_params test_rht_params_dup = {
|
||||
@@ -296,8 +295,6 @@ static int __init test_rhltable(unsigned int entries)
|
||||
if (!obj_in_table)
|
||||
goto out_free;
|
||||
|
||||
/* nulls_base not supported in rhlist interface */
|
||||
test_rht_params.nulls_base = 0;
|
||||
err = rhltable_init(&rhlt, &test_rht_params);
|
||||
if (WARN_ON(err))
|
||||
goto out_free;
|
||||
@@ -501,6 +498,8 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
|
||||
unsigned int i, cnt = 0;
|
||||
|
||||
ht = &rhlt->ht;
|
||||
/* Take the mutex to avoid RCU warning */
|
||||
mutex_lock(&ht->mutex);
|
||||
tbl = rht_dereference(ht->tbl, ht);
|
||||
for (i = 0; i < tbl->size; i++) {
|
||||
struct rhash_head *pos, *next;
|
||||
@@ -534,6 +533,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
|
||||
}
|
||||
}
|
||||
printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
|
||||
mutex_unlock(&ht->mutex);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
@@ -1651,6 +1651,17 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
|
||||
return widen_string(buf, buf - buf_start, end, spec);
|
||||
}
|
||||
|
||||
/* Make pointers available for printing early in the boot sequence. */
|
||||
static int debug_boot_weak_hash __ro_after_init;
|
||||
|
||||
static int __init debug_boot_weak_hash_enable(char *str)
|
||||
{
|
||||
debug_boot_weak_hash = 1;
|
||||
pr_info("debug_boot_weak_hash enabled\n");
|
||||
return 0;
|
||||
}
|
||||
early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
|
||||
|
||||
static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
|
||||
static siphash_key_t ptr_key __read_mostly;
|
||||
|
||||
@@ -1675,8 +1686,16 @@ static struct random_ready_callback random_ready = {
|
||||
|
||||
static int __init initialize_ptr_random(void)
|
||||
{
|
||||
int ret = add_random_ready_callback(&random_ready);
|
||||
int key_size = sizeof(ptr_key);
|
||||
int ret;
|
||||
|
||||
/* Use hw RNG if available. */
|
||||
if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
|
||||
static_branch_disable(¬_filled_random_ptr_key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = add_random_ready_callback(&random_ready);
|
||||
if (!ret) {
|
||||
return 0;
|
||||
} else if (ret == -EALREADY) {
|
||||
@@ -1695,6 +1714,12 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
|
||||
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
|
||||
unsigned long hashval;
|
||||
|
||||
/* When debugging early boot use non-cryptographically secure hash. */
|
||||
if (unlikely(debug_boot_weak_hash)) {
|
||||
hashval = hash_long((unsigned long)ptr, 32);
|
||||
return pointer_string(buf, end, (const void *)hashval, spec);
|
||||
}
|
||||
|
||||
if (static_branch_unlikely(¬_filled_random_ptr_key)) {
|
||||
spec.field_width = 2 * sizeof(ptr);
|
||||
/* string length must be less than default_width */
|
||||
@@ -1942,6 +1967,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
|
||||
case 'F':
|
||||
return device_node_string(buf, end, ptr, spec, fmt + 1);
|
||||
}
|
||||
break;
|
||||
case 'x':
|
||||
return pointer_string(buf, end, ptr, spec);
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@
|
||||
* but they are bigger and use more memory for the lookup table.
|
||||
*/
|
||||
|
||||
#include <linux/crc32poly.h>
|
||||
#include "xz_private.h"
|
||||
|
||||
/*
|
||||
@@ -29,7 +30,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
|
||||
|
||||
XZ_EXTERN void xz_crc32_init(void)
|
||||
{
|
||||
const uint32_t poly = 0xEDB88320;
|
||||
const uint32_t poly = CRC32_POLY_LE;
|
||||
|
||||
uint32_t i;
|
||||
uint32_t j;
|
||||
|
Reference in New Issue
Block a user