io-mapping.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright © 2008 Keith Packard <[email protected]>
  4. */
  5. #ifndef _LINUX_IO_MAPPING_H
  6. #define _LINUX_IO_MAPPING_H
  7. #include <linux/types.h>
  8. #include <linux/slab.h>
  9. #include <linux/bug.h>
  10. #include <linux/io.h>
  11. #include <linux/pgtable.h>
  12. #include <asm/page.h>
  13. /*
  14. * The io_mapping mechanism provides an abstraction for mapping
  15. * individual pages from an io device to the CPU in an efficient fashion.
  16. *
  17. * See Documentation/driver-api/io-mapping.rst
  18. */
  19. struct io_mapping {
  20. resource_size_t base;
  21. unsigned long size;
  22. pgprot_t prot;
  23. void __iomem *iomem;
  24. };
  25. #ifdef CONFIG_HAVE_ATOMIC_IOMAP
  26. #include <linux/pfn.h>
  27. #include <asm/iomap.h>
  28. /*
  29. * For small address space machines, mapping large objects
  30. * into the kernel virtual space isn't practical. Where
  31. * available, use fixmap support to dynamically map pages
  32. * of the object at run time.
  33. */
  34. static inline struct io_mapping *
  35. io_mapping_init_wc(struct io_mapping *iomap,
  36. resource_size_t base,
  37. unsigned long size)
  38. {
  39. pgprot_t prot;
  40. if (iomap_create_wc(base, size, &prot))
  41. return NULL;
  42. iomap->base = base;
  43. iomap->size = size;
  44. iomap->prot = prot;
  45. return iomap;
  46. }
  47. static inline void
  48. io_mapping_fini(struct io_mapping *mapping)
  49. {
  50. iomap_free(mapping->base, mapping->size);
  51. }
  52. /* Atomic map/unmap */
  53. static inline void __iomem *
  54. io_mapping_map_atomic_wc(struct io_mapping *mapping,
  55. unsigned long offset)
  56. {
  57. resource_size_t phys_addr;
  58. BUG_ON(offset >= mapping->size);
  59. phys_addr = mapping->base + offset;
  60. preempt_disable();
  61. pagefault_disable();
  62. return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
  63. }
  64. static inline void
  65. io_mapping_unmap_atomic(void __iomem *vaddr)
  66. {
  67. kunmap_local_indexed((void __force *)vaddr);
  68. pagefault_enable();
  69. preempt_enable();
  70. }
  71. static inline void __iomem *
  72. io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
  73. {
  74. resource_size_t phys_addr;
  75. BUG_ON(offset >= mapping->size);
  76. phys_addr = mapping->base + offset;
  77. return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
  78. }
  79. static inline void io_mapping_unmap_local(void __iomem *vaddr)
  80. {
  81. kunmap_local_indexed((void __force *)vaddr);
  82. }
  83. static inline void __iomem *
  84. io_mapping_map_wc(struct io_mapping *mapping,
  85. unsigned long offset,
  86. unsigned long size)
  87. {
  88. resource_size_t phys_addr;
  89. BUG_ON(offset >= mapping->size);
  90. phys_addr = mapping->base + offset;
  91. return ioremap_wc(phys_addr, size);
  92. }
  93. static inline void
  94. io_mapping_unmap(void __iomem *vaddr)
  95. {
  96. iounmap(vaddr);
  97. }
  98. #else /* HAVE_ATOMIC_IOMAP */
  99. #include <linux/uaccess.h>
  100. /* Create the io_mapping object*/
  101. static inline struct io_mapping *
  102. io_mapping_init_wc(struct io_mapping *iomap,
  103. resource_size_t base,
  104. unsigned long size)
  105. {
  106. iomap->iomem = ioremap_wc(base, size);
  107. if (!iomap->iomem)
  108. return NULL;
  109. iomap->base = base;
  110. iomap->size = size;
  111. iomap->prot = pgprot_writecombine(PAGE_KERNEL);
  112. return iomap;
  113. }
  114. static inline void
  115. io_mapping_fini(struct io_mapping *mapping)
  116. {
  117. iounmap(mapping->iomem);
  118. }
  119. /* Non-atomic map/unmap */
  120. static inline void __iomem *
  121. io_mapping_map_wc(struct io_mapping *mapping,
  122. unsigned long offset,
  123. unsigned long size)
  124. {
  125. return mapping->iomem + offset;
  126. }
  127. static inline void
  128. io_mapping_unmap(void __iomem *vaddr)
  129. {
  130. }
  131. /* Atomic map/unmap */
  132. static inline void __iomem *
  133. io_mapping_map_atomic_wc(struct io_mapping *mapping,
  134. unsigned long offset)
  135. {
  136. preempt_disable();
  137. pagefault_disable();
  138. return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
  139. }
  140. static inline void
  141. io_mapping_unmap_atomic(void __iomem *vaddr)
  142. {
  143. io_mapping_unmap(vaddr);
  144. pagefault_enable();
  145. preempt_enable();
  146. }
  147. static inline void __iomem *
  148. io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
  149. {
  150. return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
  151. }
  152. static inline void io_mapping_unmap_local(void __iomem *vaddr)
  153. {
  154. io_mapping_unmap(vaddr);
  155. }
  156. #endif /* !HAVE_ATOMIC_IOMAP */
  157. static inline struct io_mapping *
  158. io_mapping_create_wc(resource_size_t base,
  159. unsigned long size)
  160. {
  161. struct io_mapping *iomap;
  162. iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
  163. if (!iomap)
  164. return NULL;
  165. if (!io_mapping_init_wc(iomap, base, size)) {
  166. kfree(iomap);
  167. return NULL;
  168. }
  169. return iomap;
  170. }
  171. static inline void
  172. io_mapping_free(struct io_mapping *iomap)
  173. {
  174. io_mapping_fini(iomap);
  175. kfree(iomap);
  176. }
  177. #endif /* _LINUX_IO_MAPPING_H */
  178. int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
  179. unsigned long addr, unsigned long pfn, unsigned long size);