ioremap.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * arch/parisc/mm/ioremap.c
  4. *
  5. * (C) Copyright 1995 1996 Linus Torvalds
  6. * (C) Copyright 2001-2019 Helge Deller <[email protected]>
  7. * (C) Copyright 2005 Kyle McMartin <[email protected]>
  8. */
  9. #include <linux/vmalloc.h>
  10. #include <linux/errno.h>
  11. #include <linux/module.h>
  12. #include <linux/io.h>
  13. #include <linux/mm.h>
  14. /*
  15. * Generic mapping function (not visible outside):
  16. */
  17. /*
  18. * Remap an arbitrary physical address space into the kernel virtual
  19. * address space.
  20. *
  21. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  22. * have to convert them into an offset in a page-aligned mapping, but the
  23. * caller shouldn't need to know that small detail.
  24. */
  25. void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
  26. {
  27. void __iomem *addr;
  28. struct vm_struct *area;
  29. unsigned long offset, last_addr;
  30. pgprot_t pgprot;
  31. #ifdef CONFIG_EISA
  32. unsigned long end = phys_addr + size - 1;
  33. /* Support EISA addresses */
  34. if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
  35. (phys_addr >= 0x00500000 && end < 0x03bfffff))
  36. phys_addr |= F_EXTEND(0xfc000000);
  37. #endif
  38. /* Don't allow wraparound or zero size */
  39. last_addr = phys_addr + size - 1;
  40. if (!size || last_addr < phys_addr)
  41. return NULL;
  42. /*
  43. * Don't allow anybody to remap normal RAM that we're using..
  44. */
  45. if (phys_addr < virt_to_phys(high_memory)) {
  46. char *t_addr, *t_end;
  47. struct page *page;
  48. t_addr = __va(phys_addr);
  49. t_end = t_addr + (size - 1);
  50. for (page = virt_to_page(t_addr);
  51. page <= virt_to_page(t_end); page++) {
  52. if(!PageReserved(page))
  53. return NULL;
  54. }
  55. }
  56. pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
  57. _PAGE_ACCESSED | _PAGE_NO_CACHE);
  58. /*
  59. * Mappings have to be page-aligned
  60. */
  61. offset = phys_addr & ~PAGE_MASK;
  62. phys_addr &= PAGE_MASK;
  63. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  64. /*
  65. * Ok, go for it..
  66. */
  67. area = get_vm_area(size, VM_IOREMAP);
  68. if (!area)
  69. return NULL;
  70. addr = (void __iomem *) area->addr;
  71. if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
  72. phys_addr, pgprot)) {
  73. vunmap(addr);
  74. return NULL;
  75. }
  76. return (void __iomem *) (offset + (char __iomem *)addr);
  77. }
  78. EXPORT_SYMBOL(ioremap);
  79. void iounmap(const volatile void __iomem *io_addr)
  80. {
  81. unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
  82. if (is_vmalloc_addr((void *)addr))
  83. vunmap((void *)addr);
  84. }
  85. EXPORT_SYMBOL(iounmap);