ioremap.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * OpenRISC ioremap.c
  4. *
  5. * Linux architectural port borrowing liberally from similar works of
  6. * others. All original copyrights apply as per the original source
  7. * declaration.
  8. *
  9. * Modifications for the OpenRISC architecture:
  10. * Copyright (C) 2003 Matjaz Breskvar <[email protected]>
  11. * Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
  12. */
  13. #include <linux/vmalloc.h>
  14. #include <linux/io.h>
  15. #include <linux/pgtable.h>
  16. #include <asm/pgalloc.h>
  17. #include <asm/fixmap.h>
  18. #include <asm/bug.h>
  19. #include <linux/sched.h>
  20. #include <asm/tlbflush.h>
  21. extern int mem_init_done;
  22. static unsigned int fixmaps_used __initdata;
  23. /*
  24. * Remap an arbitrary physical address space into the kernel virtual
  25. * address space. Needed when the kernel wants to access high addresses
  26. * directly.
  27. *
  28. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  29. * have to convert them into an offset in a page-aligned mapping, but the
  30. * caller shouldn't need to know that small detail.
  31. */
  32. void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
  33. {
  34. phys_addr_t p;
  35. unsigned long v;
  36. unsigned long offset, last_addr;
  37. struct vm_struct *area = NULL;
  38. /* Don't allow wraparound or zero size */
  39. last_addr = addr + size - 1;
  40. if (!size || last_addr < addr)
  41. return NULL;
  42. /*
  43. * Mappings have to be page-aligned
  44. */
  45. offset = addr & ~PAGE_MASK;
  46. p = addr & PAGE_MASK;
  47. size = PAGE_ALIGN(last_addr + 1) - p;
  48. if (likely(mem_init_done)) {
  49. area = get_vm_area(size, VM_IOREMAP);
  50. if (!area)
  51. return NULL;
  52. v = (unsigned long)area->addr;
  53. } else {
  54. if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
  55. return NULL;
  56. v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
  57. fixmaps_used += (size >> PAGE_SHIFT);
  58. }
  59. if (ioremap_page_range(v, v + size, p,
  60. __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
  61. if (likely(mem_init_done))
  62. vfree(area->addr);
  63. else
  64. fixmaps_used -= (size >> PAGE_SHIFT);
  65. return NULL;
  66. }
  67. return (void __iomem *)(offset + (char *)v);
  68. }
  69. EXPORT_SYMBOL(ioremap);
  70. void iounmap(volatile void __iomem *addr)
  71. {
  72. /* If the page is from the fixmap pool then we just clear out
  73. * the fixmap mapping.
  74. */
  75. if (unlikely((unsigned long)addr > FIXADDR_START)) {
  76. /* This is a bit broken... we don't really know
  77. * how big the area is so it's difficult to know
  78. * how many fixed pages to invalidate...
  79. * just flush tlb and hope for the best...
  80. * consider this a FIXME
  81. *
  82. * Really we should be clearing out one or more page
  83. * table entries for these virtual addresses so that
  84. * future references cause a page fault... for now, we
  85. * rely on two things:
  86. * i) this code never gets called on known boards
  87. * ii) invalid accesses to the freed areas aren't made
  88. */
  89. flush_tlb_all();
  90. return;
  91. }
  92. return vfree((void *)(PAGE_MASK & (unsigned long)addr));
  93. }
  94. EXPORT_SYMBOL(iounmap);
  95. /**
  96. * OK, this one's a bit tricky... ioremap can get called before memory is
  97. * initialized (early serial console does this) and will want to alloc a page
  98. * for its mapping. No userspace pages will ever get allocated before memory
  99. * is initialized so this applies only to kernel pages. In the event that
  100. * this is called before memory is initialized we allocate the page using
  101. * the memblock infrastructure.
  102. */
  103. pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
  104. {
  105. pte_t *pte;
  106. if (likely(mem_init_done)) {
  107. pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
  108. } else {
  109. pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  110. if (!pte)
  111. panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  112. __func__, PAGE_SIZE, PAGE_SIZE);
  113. }
  114. return pte;
  115. }