ioremap.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. */
  5. #include <linux/vmalloc.h>
  6. #include <linux/init.h>
  7. #include <linux/module.h>
  8. #include <linux/io.h>
  9. #include <linux/mm.h>
  10. #include <linux/slab.h>
  11. #include <linux/cache.h>
  12. static inline bool arc_uncached_addr_space(phys_addr_t paddr)
  13. {
  14. if (is_isa_arcompact()) {
  15. if (paddr >= ARC_UNCACHED_ADDR_SPACE)
  16. return true;
  17. } else if (paddr >= perip_base && paddr <= perip_end) {
  18. return true;
  19. }
  20. return false;
  21. }
  22. void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
  23. {
  24. phys_addr_t end;
  25. /* Don't allow wraparound or zero size */
  26. end = paddr + size - 1;
  27. if (!size || (end < paddr))
  28. return NULL;
  29. /*
  30. * If the region is h/w uncached, MMU mapping can be elided as optim
  31. * The cast to u32 is fine as this region can only be inside 4GB
  32. */
  33. if (arc_uncached_addr_space(paddr))
  34. return (void __iomem *)(u32)paddr;
  35. return ioremap_prot(paddr, size,
  36. pgprot_val(pgprot_noncached(PAGE_KERNEL)));
  37. }
  38. EXPORT_SYMBOL(ioremap);
  39. /*
  40. * ioremap with access flags
  41. * Cache semantics wise it is same as ioremap - "forced" uncached.
  42. * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
  43. * ARC hardware uncached region, this one still goes thru the MMU as caller
  44. * might need finer access control (R/W/X)
  45. */
  46. void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
  47. unsigned long flags)
  48. {
  49. unsigned int off;
  50. unsigned long vaddr;
  51. struct vm_struct *area;
  52. phys_addr_t end;
  53. pgprot_t prot = __pgprot(flags);
  54. /* Don't allow wraparound, zero size */
  55. end = paddr + size - 1;
  56. if ((!size) || (end < paddr))
  57. return NULL;
  58. /* An early platform driver might end up here */
  59. if (!slab_is_available())
  60. return NULL;
  61. /* force uncached */
  62. prot = pgprot_noncached(prot);
  63. /* Mappings have to be page-aligned */
  64. off = paddr & ~PAGE_MASK;
  65. paddr &= PAGE_MASK_PHYS;
  66. size = PAGE_ALIGN(end + 1) - paddr;
  67. /*
  68. * Ok, go for it..
  69. */
  70. area = get_vm_area(size, VM_IOREMAP);
  71. if (!area)
  72. return NULL;
  73. area->phys_addr = paddr;
  74. vaddr = (unsigned long)area->addr;
  75. if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
  76. vunmap((void __force *)vaddr);
  77. return NULL;
  78. }
  79. return (void __iomem *)(off + (char __iomem *)vaddr);
  80. }
  81. EXPORT_SYMBOL(ioremap_prot);
  82. void iounmap(const volatile void __iomem *addr)
  83. {
  84. /* weird double cast to handle phys_addr_t > 32 bits */
  85. if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
  86. return;
  87. vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
  88. }
  89. EXPORT_SYMBOL(iounmap);