Merge tag 'libnvdimm-for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm and dax updates from Dan Williams: "There were multiple touches outside of drivers/nvdimm/ this round to add cross arch compatibility to the devm_memremap_pages() interface, enhance numa information for persistent memory ranges, and add a zero_page_range() dax operation. This cycle I switched from the patchwork api to Konstantin's b4 script for collecting tags (from x86, PowerPC, filesystem, and device-mapper folks), and everything looks to have gone ok there. This has all appeared in -next with no reported issues. Summary: - Add support for region alignment configuration and enforcement to fix compatibility across architectures and PowerPC page size configurations. - Introduce 'zero_page_range' as a dax operation. This facilitates filesystem-dax operation without a block-device. - Introduce phys_to_target_node() to facilitate drivers that want to know resulting numa node if a given reserved address range was onlined. - Advertise a persistence-domain for of_pmem and papr_scm. The persistence domain indicates where cpu-store cycles need to reach in the platform-memory subsystem before the platform will consider them power-fail protected. - Promote numa_map_to_online_node() to a cross-kernel generic facility. - Save x86 numa information to allow for node-id lookups for reserved memory ranges, deploy that capability for the e820-pmem driver. - Pick up some miscellaneous minor fixes, that missed v5.6-final, including a some smatch reports in the ioctl path and some unit test compilation fixups. - Fixup some flexible-array declarations" * tag 'libnvdimm-for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (29 commits) dax: Move mandatory ->zero_page_range() check in alloc_dax() dax,iomap: Add helper dax_iomap_zero() to zero a range dax: Use new dax zero page method for zeroing a page dm,dax: Add dax zero_page_range operation s390,dcssblk,dax: Add dax zero_page_range operation to dcssblk driver dax, pmem: Add a dax operation zero_page_range pmem: Add functions for reading/writing page to/from pmem libnvdimm: Update persistence domain value for of_pmem and papr_scm device tools/test/nvdimm: Fix out of tree build libnvdimm/region: Fix build error libnvdimm/region: Replace zero-length array with flexible-array member libnvdimm/label: Replace zero-length array with flexible-array member ACPI: NFIT: Replace zero-length array with flexible-array member libnvdimm/region: Introduce an 'align' attribute libnvdimm/region: Introduce NDD_LABELING libnvdimm/namespace: Enforce memremap_compat_align() libnvdimm/pfn: Prevent raw mode fallback if pfn-infoblock valid libnvdimm: Out of bounds read in __nd_ioctl() acpi/nfit: improve bounds checking for 'func' mm/memremap_pages: Introduce memremap_compat_align() ...
Cette révision appartient à :
@@ -360,7 +360,7 @@ static union acpi_object *acpi_label_info(acpi_handle handle)
|
||||
|
||||
static u8 nfit_dsm_revid(unsigned family, unsigned func)
|
||||
{
|
||||
static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
|
||||
static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
|
||||
[NVDIMM_FAMILY_INTEL] = {
|
||||
[NVDIMM_INTEL_GET_MODES] = 2,
|
||||
[NVDIMM_INTEL_GET_FWINFO] = 2,
|
||||
@@ -386,7 +386,7 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
|
||||
|
||||
if (family > NVDIMM_FAMILY_MAX)
|
||||
return 0;
|
||||
if (func > 31)
|
||||
if (func > NVDIMM_CMD_MAX)
|
||||
return 0;
|
||||
id = revid_table[family][func];
|
||||
if (id == 0)
|
||||
@@ -492,7 +492,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
* Check for a valid command. For ND_CMD_CALL, we also have to
|
||||
* make sure that the DSM function is supported.
|
||||
*/
|
||||
if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
|
||||
if (cmd == ND_CMD_CALL &&
|
||||
(func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
|
||||
return -ENOTTY;
|
||||
else if (!test_bit(cmd, &cmd_mask))
|
||||
return -ENOTTY;
|
||||
@@ -2026,8 +2027,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (nfit_mem->bdw && nfit_mem->memdev_pmem)
|
||||
if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
|
||||
set_bit(NDD_ALIASING, &flags);
|
||||
set_bit(NDD_LABELING, &flags);
|
||||
}
|
||||
|
||||
/* collate flags across all memdevs for this dimm */
|
||||
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
||||
@@ -3492,7 +3495,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
|
||||
if (nvdimm && cmd == ND_CMD_CALL &&
|
||||
call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
|
||||
func = call_pkg->nd_command;
|
||||
if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
|
||||
if (func > NVDIMM_CMD_MAX ||
|
||||
(1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
@@ -34,6 +34,7 @@
|
||||
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
|
||||
|
||||
#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
|
||||
#define NVDIMM_CMD_MAX 31
|
||||
|
||||
#define NVDIMM_STANDARD_CMDMASK \
|
||||
(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
|
||||
@@ -144,32 +145,32 @@ struct nfit_spa {
|
||||
unsigned long ars_state;
|
||||
u32 clear_err_unit;
|
||||
u32 max_ars;
|
||||
struct acpi_nfit_system_address spa[0];
|
||||
struct acpi_nfit_system_address spa[];
|
||||
};
|
||||
|
||||
struct nfit_dcr {
|
||||
struct list_head list;
|
||||
struct acpi_nfit_control_region dcr[0];
|
||||
struct acpi_nfit_control_region dcr[];
|
||||
};
|
||||
|
||||
struct nfit_bdw {
|
||||
struct list_head list;
|
||||
struct acpi_nfit_data_region bdw[0];
|
||||
struct acpi_nfit_data_region bdw[];
|
||||
};
|
||||
|
||||
struct nfit_idt {
|
||||
struct list_head list;
|
||||
struct acpi_nfit_interleave idt[0];
|
||||
struct acpi_nfit_interleave idt[];
|
||||
};
|
||||
|
||||
struct nfit_flush {
|
||||
struct list_head list;
|
||||
struct acpi_nfit_flush_address flush[0];
|
||||
struct acpi_nfit_flush_address flush[];
|
||||
};
|
||||
|
||||
struct nfit_memdev {
|
||||
struct list_head list;
|
||||
struct acpi_nfit_memory_map memdev[0];
|
||||
struct acpi_nfit_memory_map memdev[];
|
||||
};
|
||||
|
||||
enum nfit_mem_flags {
|
||||
|
@@ -72,47 +72,6 @@ int acpi_map_pxm_to_node(int pxm)
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_pxm_to_node);
|
||||
|
||||
/**
|
||||
* acpi_map_pxm_to_online_node - Map proximity ID to online node
|
||||
* @pxm: ACPI proximity ID
|
||||
*
|
||||
* This is similar to acpi_map_pxm_to_node(), but always returns an online
|
||||
* node. When the mapped node from a given proximity ID is offline, it
|
||||
* looks up the node distance table and returns the nearest online node.
|
||||
*
|
||||
* ACPI device drivers, which are called after the NUMA initialization has
|
||||
* completed in the kernel, can call this interface to obtain their device
|
||||
* NUMA topology from ACPI tables. Such drivers do not have to deal with
|
||||
* offline nodes. A node may be offline when a device proximity ID is
|
||||
* unique, SRAT memory entry does not exist, or NUMA is disabled, ex.
|
||||
* "numa=off" on x86.
|
||||
*/
|
||||
int acpi_map_pxm_to_online_node(int pxm)
|
||||
{
|
||||
int node, min_node;
|
||||
|
||||
node = acpi_map_pxm_to_node(pxm);
|
||||
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = 0;
|
||||
|
||||
min_node = node;
|
||||
if (!node_online(node)) {
|
||||
int min_dist = INT_MAX, dist, n;
|
||||
|
||||
for_each_online_node(n) {
|
||||
dist = node_distance(node, n);
|
||||
if (dist < min_dist) {
|
||||
min_dist = dist;
|
||||
min_node = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return min_node;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
|
||||
|
||||
static void __init
|
||||
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
|
||||
{
|
||||
|
Référencer dans un nouveau ticket
Bloquer un utilisateur