Merge tag 'libnvdimm-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dan Williams: "The bulk of this has been in -next since before the merge window opened, with no known collisions / issues reported. The only detail worth noting, outside the summary below, is that the "libnvdimm-start-pad" topic has been truncated to just cleanups and small fixes. The full topic branch would have doubled down on hacks around the "section alignment" limitation of the core-mm, instead effort is now being spent to address that root issue in the memory hotplug implementation for v5.2. - Fix nfit-bus command submission regression - Support retrieval of short-ARS results if the ARS state is "requires continuation", and even if the "no_init_ars" module parameter is specified - Allow busy-polling of the kernel ARS state by allowing root to reset the exponential back-off timer - Filter potentially stale ARS results by tracking query-ARS relative to the previous start-ARS - Enhance dax_device alignment checks - Add support for the Hyper-V family of device-specific-methods (DSMs) - Add several fixes and workarounds for Hyper-V compatibility - Fix support to cache the dirty-shutdown-count at init" * tag 'libnvdimm-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (25 commits) libnvdimm/namespace: Clean up holder_class_store() libnvdimm/of_pmem: Fix platform_no_drv_owner.cocci warnings acpi/nfit: Update NFIT flags error message libnvdimm/btt: Fix LBA masking during 'free list' population libnvdimm/btt: Remove unnecessary code in btt_freelist_init libnvdimm/pfn: Remove dax_label_reserve dax: Check the end of the block-device capacity with dax_direct_access() nfit/ars: Avoid stale ARS results nfit/ars: Allow root to busy-poll the ARS state machine nfit/ars: Introduce scrub_flags nfit/ars: Remove ars_start_flags nfit/ars: Attempt short-ARS even in the no_init_ars case nfit/ars: Attempt a short-ARS whenever the ARS state is idle at boot acpi/nfit: Require opt-in for read-only label configurations libnvdimm/pmem: Honor force_raw for legacy pmem regions libnvdimm/pfn: Account for PAGE_SIZE > info-block-size in nd_pfn_init() libnvdimm: Fix altmap reservation size calculation libnvdimm, pfn: Fix over-trim in trim_pfn_device() acpi/nfit: Fix bus command validation libnvdimm/dimm: Add a no-BLK quirk based on NVDIMM family ...
This commit is contained in:
@@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
|
||||
|
||||
static int btt_freelist_init(struct arena_info *arena)
|
||||
{
|
||||
int old, new, ret;
|
||||
u32 i, map_entry;
|
||||
struct log_entry log_new, log_old;
|
||||
int new, ret;
|
||||
struct log_entry log_new;
|
||||
u32 i, map_entry, log_oldmap, log_newmap;
|
||||
|
||||
arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
|
||||
GFP_KERNEL);
|
||||
@@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < arena->nfree; i++) {
|
||||
old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
|
||||
if (old < 0)
|
||||
return old;
|
||||
|
||||
new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
|
||||
if (new < 0)
|
||||
return new;
|
||||
|
||||
/* old and new map entries with any flags stripped out */
|
||||
log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
|
||||
log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
|
||||
|
||||
/* sub points to the next one to be overwritten */
|
||||
arena->freelist[i].sub = 1 - new;
|
||||
arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
|
||||
arena->freelist[i].block = le32_to_cpu(log_new.old_map);
|
||||
arena->freelist[i].block = log_oldmap;
|
||||
|
||||
/*
|
||||
* FIXME: if error clearing fails during init, we want to make
|
||||
* the BTT read-only
|
||||
*/
|
||||
if (ent_e_flag(log_new.old_map)) {
|
||||
if (ent_e_flag(log_new.old_map) &&
|
||||
!ent_normal(log_new.old_map)) {
|
||||
arena->freelist[i].has_err = 1;
|
||||
ret = arena_clear_freelist_error(arena, i);
|
||||
if (ret)
|
||||
dev_err_ratelimited(to_dev(arena),
|
||||
@@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena)
|
||||
}
|
||||
|
||||
/* This implies a newly created or untouched flog entry */
|
||||
if (log_new.old_map == log_new.new_map)
|
||||
if (log_oldmap == log_newmap)
|
||||
continue;
|
||||
|
||||
/* Check if map recovery is needed */
|
||||
@@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena)
|
||||
NULL, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
if ((le32_to_cpu(log_new.new_map) != map_entry) &&
|
||||
(le32_to_cpu(log_new.old_map) == map_entry)) {
|
||||
|
||||
/*
|
||||
* The map_entry from btt_read_map is stripped of any flag bits,
|
||||
* so use the stripped out versions from the log as well for
|
||||
* testing whether recovery is needed. For restoration, use the
|
||||
* 'raw' version of the log entries as that captured what we
|
||||
* were going to write originally.
|
||||
*/
|
||||
if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
|
||||
/*
|
||||
* Last transaction wrote the flog, but wasn't able
|
||||
* to complete the map write. So fix up the map.
|
||||
|
@@ -44,6 +44,8 @@
|
||||
#define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK))
|
||||
#define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK))
|
||||
#define set_e_flag(ent) (ent |= MAP_ERR_MASK)
|
||||
/* 'normal' is both e and z flags set */
|
||||
#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent))
|
||||
|
||||
enum btt_init_state {
|
||||
INIT_UNCHECKED = 0,
|
||||
|
@@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR_RO(size);
|
||||
|
||||
static ssize_t log_zero_flags_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "Y\n");
|
||||
}
|
||||
static DEVICE_ATTR_RO(log_zero_flags);
|
||||
|
||||
static struct attribute *nd_btt_attributes[] = {
|
||||
&dev_attr_sector_size.attr,
|
||||
&dev_attr_namespace.attr,
|
||||
&dev_attr_uuid.attr,
|
||||
&dev_attr_size.attr,
|
||||
&dev_attr_log_zero_flags.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@@ -11,6 +11,7 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/ndctl.h>
|
||||
@@ -25,6 +26,10 @@
|
||||
|
||||
static DEFINE_IDA(dimm_ida);
|
||||
|
||||
static bool noblk;
|
||||
module_param(noblk, bool, 0444);
|
||||
MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
|
||||
|
||||
/*
|
||||
* Retrieve bus and dimm handle and return if this bus supports
|
||||
* get_config_data commands
|
||||
@@ -551,6 +556,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
|
||||
|
||||
nvdimm->dimm_id = dimm_id;
|
||||
nvdimm->provider_data = provider_data;
|
||||
if (noblk)
|
||||
flags |= 1 << NDD_NOBLK;
|
||||
nvdimm->flags = flags;
|
||||
nvdimm->cmd_mask = cmd_mask;
|
||||
nvdimm->num_flush = num_flush;
|
||||
|
@@ -392,6 +392,7 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
|
||||
return 0; /* no label, nothing to reserve */
|
||||
|
||||
for_each_clear_bit_le(slot, free, nslot) {
|
||||
struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
|
||||
struct nd_namespace_label *nd_label;
|
||||
struct nd_region *nd_region = NULL;
|
||||
u8 label_uuid[NSLABEL_UUID_LEN];
|
||||
@@ -406,6 +407,8 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
|
||||
|
||||
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
|
||||
flags = __le32_to_cpu(nd_label->flags);
|
||||
if (test_bit(NDD_NOBLK, &nvdimm->flags))
|
||||
flags &= ~NSLABEL_FLAG_LOCAL;
|
||||
nd_label_gen_id(&label_id, label_uuid, flags);
|
||||
res = nvdimm_allocate_dpa(ndd, &label_id,
|
||||
__le64_to_cpu(nd_label->dpa),
|
||||
@@ -755,7 +758,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
|
||||
|
||||
static int __pmem_label_update(struct nd_region *nd_region,
|
||||
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
|
||||
int pos)
|
||||
int pos, unsigned long flags)
|
||||
{
|
||||
struct nd_namespace_common *ndns = &nspm->nsio.common;
|
||||
struct nd_interleave_set *nd_set = nd_region->nd_set;
|
||||
@@ -796,7 +799,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
|
||||
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
|
||||
if (nspm->alt_name)
|
||||
memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
|
||||
nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
|
||||
nd_label->flags = __cpu_to_le32(flags);
|
||||
nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
|
||||
nd_label->position = __cpu_to_le16(pos);
|
||||
nd_label->isetcookie = __cpu_to_le64(cookie);
|
||||
@@ -1249,13 +1252,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
|
||||
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
|
||||
struct nd_namespace_pmem *nspm, resource_size_t size)
|
||||
{
|
||||
int i;
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
||||
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
||||
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
||||
struct resource *res;
|
||||
int rc, count = 0;
|
||||
int count = 0;
|
||||
|
||||
if (size == 0) {
|
||||
rc = del_labels(nd_mapping, nspm->uuid);
|
||||
@@ -1273,7 +1276,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
|
||||
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
|
||||
NSLABEL_FLAG_UPDATING);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (size == 0)
|
||||
return 0;
|
||||
|
||||
/* Clear the UPDATING flag per UEFI 2.7 expectations */
|
||||
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
||||
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
||||
|
||||
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
@@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
|
||||
bool pmem_should_map_pages(struct device *dev)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
struct nd_namespace_common *ndns = to_ndns(dev);
|
||||
struct nd_namespace_io *nsio;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
|
||||
@@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
|
||||
if (is_nd_pfn(dev) || is_nd_btt(dev))
|
||||
return false;
|
||||
|
||||
if (ndns->force_raw)
|
||||
return false;
|
||||
|
||||
nsio = to_nd_namespace_io(dev);
|
||||
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
|
||||
IORESOURCE_SYSTEM_RAM,
|
||||
@@ -1506,13 +1510,13 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
|
||||
if (dev->driver || ndns->claim)
|
||||
return -EBUSY;
|
||||
|
||||
if (strcmp(buf, "btt") == 0 || strcmp(buf, "btt\n") == 0)
|
||||
if (sysfs_streq(buf, "btt"))
|
||||
ndns->claim_class = btt_claim_class(dev);
|
||||
else if (strcmp(buf, "pfn") == 0 || strcmp(buf, "pfn\n") == 0)
|
||||
else if (sysfs_streq(buf, "pfn"))
|
||||
ndns->claim_class = NVDIMM_CCLASS_PFN;
|
||||
else if (strcmp(buf, "dax") == 0 || strcmp(buf, "dax\n") == 0)
|
||||
else if (sysfs_streq(buf, "dax"))
|
||||
ndns->claim_class = NVDIMM_CCLASS_DAX;
|
||||
else if (strcmp(buf, "") == 0 || strcmp(buf, "\n") == 0)
|
||||
else if (sysfs_streq(buf, ""))
|
||||
ndns->claim_class = NVDIMM_CCLASS_NONE;
|
||||
else
|
||||
return -EINVAL;
|
||||
@@ -2492,6 +2496,12 @@ static int init_active_labels(struct nd_region *nd_region)
|
||||
if (!label_ent)
|
||||
break;
|
||||
label = nd_label_active(ndd, j);
|
||||
if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
|
||||
u32 flags = __le32_to_cpu(label->flags);
|
||||
|
||||
flags &= ~NSLABEL_FLAG_LOCAL;
|
||||
label->flags = __cpu_to_le32(flags);
|
||||
}
|
||||
label_ent->label = label;
|
||||
|
||||
mutex_lock(&nd_mapping->lock);
|
||||
|
@@ -108,7 +108,6 @@ static struct platform_driver of_pmem_region_driver = {
|
||||
.remove = of_pmem_region_remove,
|
||||
.driver = {
|
||||
.name = "of_pmem",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_pmem_region_match,
|
||||
},
|
||||
};
|
||||
|
@@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
|
||||
}
|
||||
EXPORT_SYMBOL(nd_pfn_probe);
|
||||
|
||||
static u32 info_block_reserve(void)
|
||||
{
|
||||
return ALIGN(SZ_8K, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* We hotplug memory at section granularity, pad the reserved area from
|
||||
* the previous section base to the namespace base address.
|
||||
@@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base)
|
||||
|
||||
static unsigned long init_altmap_reserve(resource_size_t base)
|
||||
{
|
||||
unsigned long reserve = PHYS_PFN(SZ_8K);
|
||||
unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
|
||||
unsigned long base_pfn = PHYS_PFN(base);
|
||||
|
||||
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
|
||||
@@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
||||
u64 offset = le64_to_cpu(pfn_sb->dataoff);
|
||||
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
|
||||
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
|
||||
u32 reserve = info_block_reserve();
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
resource_size_t base = nsio->res.start + start_pad;
|
||||
@@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
||||
res->end -= end_trunc;
|
||||
|
||||
if (nd_pfn->mode == PFN_MODE_RAM) {
|
||||
if (offset < SZ_8K)
|
||||
if (offset < reserve)
|
||||
return -EINVAL;
|
||||
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
|
||||
pgmap->altmap_valid = false;
|
||||
@@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
||||
le64_to_cpu(nd_pfn->pfn_sb->npfns),
|
||||
nd_pfn->npfns);
|
||||
memcpy(altmap, &__altmap, sizeof(*altmap));
|
||||
altmap->free = PHYS_PFN(offset - SZ_8K);
|
||||
altmap->free = PHYS_PFN(offset - reserve);
|
||||
altmap->alloc = 0;
|
||||
pgmap->altmap_valid = true;
|
||||
} else
|
||||
@@ -678,18 +684,17 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| !IS_ALIGNED(end, nd_pfn->align)
|
||||
|| nd_region_conflict(nd_region, start, size + adjust))
|
||||
|| nd_region_conflict(nd_region, start, size))
|
||||
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
|
||||
}
|
||||
|
||||
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
{
|
||||
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
u32 start_pad, end_trunc, reserve = info_block_reserve();
|
||||
resource_size_t start, size;
|
||||
struct nd_region *nd_region;
|
||||
u32 start_pad, end_trunc;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
unsigned long npfns;
|
||||
phys_addr_t offset;
|
||||
@@ -734,7 +739,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
*/
|
||||
start = nsio->res.start + start_pad;
|
||||
size = resource_size(&nsio->res);
|
||||
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
|
||||
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
|
||||
/ PAGE_SIZE);
|
||||
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
/*
|
||||
@@ -742,11 +747,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
* when populating the vmemmap. This *should* be equal to
|
||||
* PMD_SIZE for most architectures.
|
||||
*/
|
||||
offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
|
||||
offset = ALIGN(start + reserve + 64 * npfns,
|
||||
max(nd_pfn->align, PMD_SIZE)) - start;
|
||||
} else if (nd_pfn->mode == PFN_MODE_RAM)
|
||||
offset = ALIGN(start + SZ_8K + dax_label_reserve,
|
||||
nd_pfn->align) - start;
|
||||
offset = ALIGN(start + reserve, nd_pfn->align) - start;
|
||||
else
|
||||
return -ENXIO;
|
||||
|
||||
|
@@ -1003,6 +1003,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
|
||||
|
||||
if (test_bit(NDD_UNARMED, &nvdimm->flags))
|
||||
ro = 1;
|
||||
|
||||
if (test_bit(NDD_NOBLK, &nvdimm->flags)
|
||||
&& dev_type == &nd_blk_device_type) {
|
||||
dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
|
||||
caller, dev_name(&nvdimm->dev), i);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev_type == &nd_blk_device_type) {
|
||||
|
مرجع در شماره جدید
Block a user