libnvdimm: fix clear poison locking with spinlock and GFP_NOWAIT allocation
The following warning results from holding a lane spinlock, preempt_disable(), or the btt map spinlock and then trying to take the reconfig_mutex to walk the poison list and potentially add new entries. BUG: sleeping function called from invalid context at kernel/locking/mutex. c:747 in_atomic(): 1, irqs_disabled(): 0, pid: 17159, name: dd [..] Call Trace: dump_stack+0x85/0xc8 ___might_sleep+0x184/0x250 __might_sleep+0x4a/0x90 __mutex_lock+0x58/0x9b0 ? nvdimm_bus_lock+0x21/0x30 [libnvdimm] ? __nvdimm_bus_badblocks_clear+0x2f/0x60 [libnvdimm] ? acpi_nfit_forget_poison+0x79/0x80 [nfit] ? _raw_spin_unlock+0x27/0x40 mutex_lock_nested+0x1b/0x20 nvdimm_bus_lock+0x21/0x30 [libnvdimm] nvdimm_forget_poison+0x25/0x50 [libnvdimm] nvdimm_clear_poison+0x106/0x140 [libnvdimm] nsio_rw_bytes+0x164/0x270 [libnvdimm] btt_write_pg+0x1de/0x3e0 [nd_btt] ? blk_queue_enter+0x30/0x290 btt_make_request+0x11a/0x310 [nd_btt] ? blk_queue_enter+0xb7/0x290 ? blk_queue_enter+0x30/0x290 generic_make_request+0x118/0x3b0 A spinlock is introduced to protect the poison list. This allows us to not having to acquire the reconfig_mutex for touching the poison list. The add_poison() function has been broken out into two helper functions. One to allocate the poison entry and the other to apppend the entry. This allows us to unlock the poison_lock in non-I/O path and continue to be able to allocate the poison entry with GFP_KERNEL. We will use GFP_NOWAIT in the I/O path in order to satisfy being in atomic context. Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Este cometimento está contido em:

cometido por
Dan Williams

ascendente
efebc71118
cometimento
b3b454f694
@@ -518,6 +518,15 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
|
||||
|
||||
static void append_poison_entry(struct nvdimm_bus *nvdimm_bus,
|
||||
struct nd_poison *pl, u64 addr, u64 length)
|
||||
{
|
||||
lockdep_assert_held(&nvdimm_bus->poison_lock);
|
||||
pl->start = addr;
|
||||
pl->length = length;
|
||||
list_add_tail(&pl->list, &nvdimm_bus->poison_list);
|
||||
}
|
||||
|
||||
static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
|
||||
gfp_t flags)
|
||||
{
|
||||
@@ -527,19 +536,24 @@ static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
|
||||
if (!pl)
|
||||
return -ENOMEM;
|
||||
|
||||
pl->start = addr;
|
||||
pl->length = length;
|
||||
list_add_tail(&pl->list, &nvdimm_bus->poison_list);
|
||||
|
||||
append_poison_entry(nvdimm_bus, pl, addr, length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
|
||||
{
|
||||
struct nd_poison *pl;
|
||||
struct nd_poison *pl, *pl_new;
|
||||
|
||||
if (list_empty(&nvdimm_bus->poison_list))
|
||||
return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
|
||||
spin_unlock(&nvdimm_bus->poison_lock);
|
||||
pl_new = kzalloc(sizeof(*pl_new), GFP_KERNEL);
|
||||
spin_lock(&nvdimm_bus->poison_lock);
|
||||
|
||||
if (list_empty(&nvdimm_bus->poison_list)) {
|
||||
if (!pl_new)
|
||||
return -ENOMEM;
|
||||
append_poison_entry(nvdimm_bus, pl_new, addr, length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* There is a chance this is a duplicate, check for those first.
|
||||
@@ -551,6 +565,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
|
||||
/* If length has changed, update this list entry */
|
||||
if (pl->length != length)
|
||||
pl->length = length;
|
||||
kfree(pl_new);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -559,30 +574,33 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
|
||||
* as any overlapping ranges will get resolved when the list is consumed
|
||||
* and converted to badblocks
|
||||
*/
|
||||
return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
|
||||
if (!pl_new)
|
||||
return -ENOMEM;
|
||||
append_poison_entry(nvdimm_bus, pl_new, addr, length);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
|
||||
{
|
||||
int rc;
|
||||
|
||||
nvdimm_bus_lock(&nvdimm_bus->dev);
|
||||
spin_lock(&nvdimm_bus->poison_lock);
|
||||
rc = bus_add_poison(nvdimm_bus, addr, length);
|
||||
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
||||
spin_unlock(&nvdimm_bus->poison_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
|
||||
|
||||
void __nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus, phys_addr_t start,
|
||||
void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus, phys_addr_t start,
|
||||
unsigned int len)
|
||||
{
|
||||
struct list_head *poison_list = &nvdimm_bus->poison_list;
|
||||
u64 clr_end = start + len - 1;
|
||||
struct nd_poison *pl, *next;
|
||||
|
||||
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
|
||||
|
||||
spin_lock(&nvdimm_bus->poison_lock);
|
||||
WARN_ON_ONCE(list_empty(poison_list));
|
||||
|
||||
/*
|
||||
@@ -629,21 +647,13 @@ void __nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus, phys_addr_t start,
|
||||
u64 new_len = pl_end - new_start + 1;
|
||||
|
||||
/* Add new entry covering the right half */
|
||||
add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
|
||||
add_poison(nvdimm_bus, new_start, new_len, GFP_NOWAIT);
|
||||
/* Adjust this entry to cover the left half */
|
||||
pl->length = start - pl->start;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__nvdimm_forget_poison);
|
||||
|
||||
void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus,
|
||||
phys_addr_t start, unsigned int len)
|
||||
{
|
||||
nvdimm_bus_lock(&nvdimm_bus->dev);
|
||||
__nvdimm_forget_poison(nvdimm_bus, start, len);
|
||||
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
||||
spin_unlock(&nvdimm_bus->poison_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvdimm_forget_poison);
|
||||
|
||||
|
Criar uma nova questão referindo esta
Bloquear um utilizador