tools/testing/nvdimm: replace CONFIG_DMA_CMA dependency with vmalloc()

DMA_CMA is incompatible with SWIOTLB used in enterprise distro
configurations.  Switch to vmalloc() allocations for all resources.

Acked-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Dan Williams
2016-06-15 20:34:17 -07:00
parent f295e53b60
commit ee8520fe8c
4 changed files with 42 additions and 48 deletions

View File

@@ -21,14 +21,26 @@ long pmem_direct_access(struct block_device *bdev, sector_t sector,
struct pmem_device *pmem = bdev->bd_queue->queuedata;
resource_size_t offset = sector * 512 + pmem->data_offset;
/* disable DAX for nfit_test pmem devices */
if (get_nfit_res(pmem->phys_addr + offset)) {
dev_info_once(pmem->bb.dev, "dax is disabled for nfit_test\n");
return -EIO;
}
if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
return -EIO;
/*
* Limit dax to a single page at a time given vmalloc()-backed
* in the nfit_test case.
*/
if (get_nfit_res(pmem->phys_addr + offset)) {
struct page *page;
*kaddr = pmem->virt_addr + offset;
page = vmalloc_to_page(pmem->virt_addr + offset);
*pfn = page_to_pfn_t(page);
dev_dbg_ratelimited(disk_to_dev(bdev->bd_disk)->parent,
"%s: sector: %#llx pfn: %#lx\n", __func__,
(unsigned long long) sector, page_to_pfn(page));
return PAGE_SIZE;
}
*kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);