pmem-dax.c 1.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2016, Intel Corporation.
  4. */
  5. #include "test/nfit_test.h"
  6. #include <linux/blkdev.h>
  7. #include <linux/dax.h>
  8. #include <pmem.h>
  9. #include <nd.h>
  10. long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
  11. long nr_pages, enum dax_access_mode mode, void **kaddr,
  12. pfn_t *pfn)
  13. {
  14. resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
  15. if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
  16. PFN_PHYS(nr_pages))))
  17. return -EIO;
  18. /*
  19. * Limit dax to a single page at a time given vmalloc()-backed
  20. * in the nfit_test case.
  21. */
  22. if (get_nfit_res(pmem->phys_addr + offset)) {
  23. struct page *page;
  24. if (kaddr)
  25. *kaddr = pmem->virt_addr + offset;
  26. page = vmalloc_to_page(pmem->virt_addr + offset);
  27. if (pfn)
  28. *pfn = page_to_pfn_t(page);
  29. pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
  30. __func__, pmem, pgoff, page_to_pfn(page));
  31. return 1;
  32. }
  33. if (kaddr)
  34. *kaddr = pmem->virt_addr + offset;
  35. if (pfn)
  36. *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
  37. /*
  38. * If badblocks are present, limit known good range to the
  39. * requested range.
  40. */
  41. if (unlikely(pmem->bb.count))
  42. return nr_pages;
  43. return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
  44. }