Merge 4.18-rc7 into driver-core-next
We need the driver core changes in here as well for testing. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
@@ -5,7 +5,8 @@ if HAVE_ARCH_KASAN
|
||||
|
||||
config KASAN
|
||||
bool "KASan: runtime memory debugger"
|
||||
depends on SLUB || (SLAB && !DEBUG_SLAB)
|
||||
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
|
||||
select SLUB_DEBUG if SLUB
|
||||
select CONSTRUCTORS
|
||||
select STACKDEPOT
|
||||
help
|
||||
|
@@ -596,15 +596,70 @@ static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
struct pipe_inode_info *pipe = i->pipe;
|
||||
size_t n, off, xfer = 0;
|
||||
int idx;
|
||||
|
||||
if (!sanity(i))
|
||||
return 0;
|
||||
|
||||
bytes = n = push_pipe(i, bytes, &idx, &off);
|
||||
if (unlikely(!n))
|
||||
return 0;
|
||||
for ( ; n; idx = next_idx(idx, pipe), off = 0) {
|
||||
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
|
||||
unsigned long rem;
|
||||
|
||||
rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
|
||||
chunk);
|
||||
i->idx = idx;
|
||||
i->iov_offset = off + chunk - rem;
|
||||
xfer += chunk - rem;
|
||||
if (rem)
|
||||
break;
|
||||
n -= chunk;
|
||||
addr += chunk;
|
||||
}
|
||||
i->count -= xfer;
|
||||
return xfer;
|
||||
}
|
||||
|
||||
/**
|
||||
* _copy_to_iter_mcsafe - copy to user with source-read error exception handling
|
||||
* @addr: source kernel address
|
||||
* @bytes: total transfer length
|
||||
* @iter: destination iterator
|
||||
*
|
||||
* The pmem driver arranges for filesystem-dax to use this facility via
|
||||
* dax_copy_to_iter() for protecting read/write to persistent memory.
|
||||
* Unless / until an architecture can guarantee identical performance
|
||||
* between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
|
||||
* performance regression to switch more users to the mcsafe version.
|
||||
*
|
||||
* Otherwise, the main differences between this and typical _copy_to_iter().
|
||||
*
|
||||
* * Typical tail/residue handling after a fault retries the copy
|
||||
* byte-by-byte until the fault happens again. Re-triggering machine
|
||||
* checks is potentially fatal so the implementation uses source
|
||||
* alignment and poison alignment assumptions to avoid re-triggering
|
||||
* hardware exceptions.
|
||||
*
|
||||
* * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
|
||||
* Compare to copy_to_iter() where only ITER_IOVEC attempts might return
|
||||
* a short copy.
|
||||
*
|
||||
* See MCSAFE_TEST for self-test.
|
||||
*/
|
||||
size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
const char *from = addr;
|
||||
unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
|
||||
|
||||
if (unlikely(i->type & ITER_PIPE)) {
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
if (unlikely(i->type & ITER_PIPE))
|
||||
return copy_pipe_to_iter_mcsafe(addr, bytes, i);
|
||||
if (iter_is_iovec(i))
|
||||
might_fault();
|
||||
iterate_and_advance(i, bytes, v,
|
||||
@@ -701,6 +756,20 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
|
||||
EXPORT_SYMBOL(_copy_from_iter_nocache);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
||||
/**
|
||||
* _copy_from_iter_flushcache - write destination through cpu cache
|
||||
* @addr: destination kernel address
|
||||
* @bytes: total transfer length
|
||||
* @iter: source iterator
|
||||
*
|
||||
* The pmem driver arranges for filesystem-dax to use this facility via
|
||||
* dax_copy_from_iter() for ensuring that writes to persistent memory
|
||||
* are flushed through the CPU cache. It is differentiated from
|
||||
* _copy_from_iter_nocache() in that guarantees all data is flushed for
|
||||
* all iterator types. The _copy_from_iter_nocache() only attempts to
|
||||
* bypass the cache for the ITER_IOVEC case, and on some archs may use
|
||||
* instructions that strand dirty-data in the cache.
|
||||
*/
|
||||
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
char *to = addr;
|
||||
|
@@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
|
||||
spin_lock_irqsave(&tags->lock, flags);
|
||||
|
||||
/* Fastpath */
|
||||
if (likely(tags->nr_free >= 0)) {
|
||||
if (likely(tags->nr_free)) {
|
||||
tag = tags->freelist[--tags->nr_free];
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
return tag;
|
||||
|
@@ -774,7 +774,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
|
||||
skip++;
|
||||
if (list == iter->list) {
|
||||
iter->p = p;
|
||||
skip = skip;
|
||||
iter->skip = skip;
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
@@ -964,8 +964,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
|
||||
|
||||
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
|
||||
{
|
||||
return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
|
||||
(unsigned long)params->min_size);
|
||||
size_t retsize;
|
||||
|
||||
if (params->nelem_hint)
|
||||
retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
|
||||
(unsigned long)params->min_size);
|
||||
else
|
||||
retsize = max(HASH_DEFAULT_SIZE,
|
||||
(unsigned long)params->min_size);
|
||||
|
||||
return retsize;
|
||||
}
|
||||
|
||||
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
|
||||
@@ -1022,8 +1030,6 @@ int rhashtable_init(struct rhashtable *ht,
|
||||
struct bucket_table *tbl;
|
||||
size_t size;
|
||||
|
||||
size = HASH_DEFAULT_SIZE;
|
||||
|
||||
if ((!params->key_len && !params->obj_hashfn) ||
|
||||
(params->obj_hashfn && !params->obj_cmpfn))
|
||||
return -EINVAL;
|
||||
@@ -1050,8 +1056,7 @@ int rhashtable_init(struct rhashtable *ht,
|
||||
|
||||
ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
|
||||
|
||||
if (params->nelem_hint)
|
||||
size = rounded_hashtable_size(&ht->p);
|
||||
size = rounded_hashtable_size(&ht->p);
|
||||
|
||||
if (params->locks_mul)
|
||||
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
|
||||
@@ -1143,13 +1148,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
|
||||
void (*free_fn)(void *ptr, void *arg),
|
||||
void *arg)
|
||||
{
|
||||
struct bucket_table *tbl;
|
||||
struct bucket_table *tbl, *next_tbl;
|
||||
unsigned int i;
|
||||
|
||||
cancel_work_sync(&ht->run_work);
|
||||
|
||||
mutex_lock(&ht->mutex);
|
||||
tbl = rht_dereference(ht->tbl, ht);
|
||||
restart:
|
||||
if (free_fn) {
|
||||
for (i = 0; i < tbl->size; i++) {
|
||||
struct rhash_head *pos, *next;
|
||||
@@ -1166,7 +1172,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
|
||||
}
|
||||
}
|
||||
|
||||
next_tbl = rht_dereference(tbl->future_tbl, ht);
|
||||
bucket_table_free(tbl);
|
||||
if (next_tbl) {
|
||||
tbl = next_tbl;
|
||||
goto restart;
|
||||
}
|
||||
mutex_unlock(&ht->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
|
||||
|
@@ -24,9 +24,6 @@
|
||||
**/
|
||||
struct scatterlist *sg_next(struct scatterlist *sg)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_SG
|
||||
BUG_ON(sg->sg_magic != SG_MAGIC);
|
||||
#endif
|
||||
if (sg_is_last(sg))
|
||||
return NULL;
|
||||
|
||||
@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
ret = sg;
|
||||
|
||||
#ifdef CONFIG_DEBUG_SG
|
||||
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
|
||||
BUG_ON(!sg_is_last(ret));
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(sg_last);
|
||||
|
@@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = {
|
||||
{ /* Mainly checking JIT here. */
|
||||
"BPF_MAXINSNS: Ctx heavy transformations",
|
||||
{ },
|
||||
#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
|
||||
CLASSIC | FLAG_EXPECTED_FAIL,
|
||||
#else
|
||||
CLASSIC,
|
||||
#endif
|
||||
{ },
|
||||
{
|
||||
{ 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
|
||||
{ 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
|
||||
},
|
||||
.fill_helper = bpf_fill_maxinsns6,
|
||||
.expected_errcode = -ENOTSUPP,
|
||||
},
|
||||
{ /* Mainly checking JIT here. */
|
||||
"BPF_MAXINSNS: Call heavy transformations",
|
||||
{ },
|
||||
#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
|
||||
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
|
||||
#else
|
||||
CLASSIC | FLAG_NO_DATA,
|
||||
#endif
|
||||
{ },
|
||||
{ { 1, 0 }, { 10, 0 } },
|
||||
.fill_helper = bpf_fill_maxinsns7,
|
||||
.expected_errcode = -ENOTSUPP,
|
||||
},
|
||||
{ /* Mainly checking JIT here. */
|
||||
"BPF_MAXINSNS: Jump heavy test",
|
||||
@@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = {
|
||||
{
|
||||
"BPF_MAXINSNS: exec all MSH",
|
||||
{ },
|
||||
#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
|
||||
CLASSIC | FLAG_EXPECTED_FAIL,
|
||||
#else
|
||||
CLASSIC,
|
||||
#endif
|
||||
{ 0xfa, 0xfb, 0xfc, 0xfd, },
|
||||
{ { 4, 0xababab83 } },
|
||||
.fill_helper = bpf_fill_maxinsns13,
|
||||
.expected_errcode = -ENOTSUPP,
|
||||
},
|
||||
{
|
||||
"BPF_MAXINSNS: ld_abs+get_processor_id",
|
||||
{ },
|
||||
#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
|
||||
CLASSIC | FLAG_EXPECTED_FAIL,
|
||||
#else
|
||||
CLASSIC,
|
||||
#endif
|
||||
{ },
|
||||
{ { 1, 0xbee } },
|
||||
.fill_helper = bpf_fill_ld_abs_get_processor_id,
|
||||
.expected_errcode = -ENOTSUPP,
|
||||
},
|
||||
/*
|
||||
* LD_IND / LD_ABS on fragmented SKBs
|
||||
|
@@ -260,13 +260,6 @@ plain(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Make sure crng is ready. Otherwise we get "(ptrval)" instead
|
||||
* of a hashed address when printing '%p' in plain_hash() and
|
||||
* plain_format().
|
||||
*/
|
||||
wait_for_random_bytes();
|
||||
|
||||
err = plain_hash();
|
||||
if (err) {
|
||||
pr_warn("plain 'p' does not appear to be hashed\n");
|
||||
|
Reference in New Issue
Block a user