ioremap.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/io.h>
  3. #include <linux/slab.h>
  4. #include <linux/mmzone.h>
  5. #include <linux/vmalloc.h>
  6. #include <asm/io-workarounds.h>
  7. unsigned long ioremap_bot;
  8. EXPORT_SYMBOL(ioremap_bot);
  9. void __iomem *ioremap(phys_addr_t addr, unsigned long size)
  10. {
  11. pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
  12. void *caller = __builtin_return_address(0);
  13. if (iowa_is_active())
  14. return iowa_ioremap(addr, size, prot, caller);
  15. return __ioremap_caller(addr, size, prot, caller);
  16. }
  17. EXPORT_SYMBOL(ioremap);
  18. void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
  19. {
  20. pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
  21. void *caller = __builtin_return_address(0);
  22. if (iowa_is_active())
  23. return iowa_ioremap(addr, size, prot, caller);
  24. return __ioremap_caller(addr, size, prot, caller);
  25. }
  26. EXPORT_SYMBOL(ioremap_wc);
  27. void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
  28. {
  29. pgprot_t prot = pgprot_cached(PAGE_KERNEL);
  30. void *caller = __builtin_return_address(0);
  31. if (iowa_is_active())
  32. return iowa_ioremap(addr, size, prot, caller);
  33. return __ioremap_caller(addr, size, prot, caller);
  34. }
  35. void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
  36. {
  37. pte_t pte = __pte(flags);
  38. void *caller = __builtin_return_address(0);
  39. /* writeable implies dirty for kernel addresses */
  40. if (pte_write(pte))
  41. pte = pte_mkdirty(pte);
  42. /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
  43. pte = pte_exprotect(pte);
  44. pte = pte_mkprivileged(pte);
  45. if (iowa_is_active())
  46. return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
  47. return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
  48. }
  49. EXPORT_SYMBOL(ioremap_prot);
  50. int early_ioremap_range(unsigned long ea, phys_addr_t pa,
  51. unsigned long size, pgprot_t prot)
  52. {
  53. unsigned long i;
  54. for (i = 0; i < size; i += PAGE_SIZE) {
  55. int err = map_kernel_page(ea + i, pa + i, prot);
  56. if (WARN_ON_ONCE(err)) /* Should clean up */
  57. return err;
  58. }
  59. return 0;
  60. }
  61. void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
  62. pgprot_t prot, void *caller)
  63. {
  64. struct vm_struct *area;
  65. int ret;
  66. unsigned long va;
  67. area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
  68. if (area == NULL)
  69. return NULL;
  70. area->phys_addr = pa;
  71. va = (unsigned long)area->addr;
  72. ret = ioremap_page_range(va, va + size, pa, prot);
  73. if (!ret)
  74. return (void __iomem *)area->addr + offset;
  75. vunmap_range(va, va + size);
  76. free_vm_area(area);
  77. return NULL;
  78. }