logic_iomem.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Intel Corporation
  4. * Author: Johannes Berg <[email protected]>
  5. */
  6. #include <linux/types.h>
  7. #include <linux/slab.h>
  8. #include <linux/logic_iomem.h>
  9. #include <asm/io.h>
  10. struct logic_iomem_region {
  11. const struct resource *res;
  12. const struct logic_iomem_region_ops *ops;
  13. struct list_head list;
  14. };
  15. struct logic_iomem_area {
  16. const struct logic_iomem_ops *ops;
  17. void *priv;
  18. };
  19. #define AREA_SHIFT 24
  20. #define MAX_AREA_SIZE (1 << AREA_SHIFT)
  21. #define MAX_AREAS ((1U << 31) / MAX_AREA_SIZE)
  22. #define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT)
  23. #define AREA_MASK (MAX_AREA_SIZE - 1)
  24. #ifdef CONFIG_64BIT
  25. #define IOREMAP_BIAS 0xDEAD000000000000UL
  26. #define IOREMAP_MASK 0xFFFFFFFF00000000UL
  27. #else
  28. #define IOREMAP_BIAS 0x80000000UL
  29. #define IOREMAP_MASK 0x80000000UL
  30. #endif
  31. static DEFINE_MUTEX(regions_mtx);
  32. static LIST_HEAD(regions_list);
  33. static struct logic_iomem_area mapped_areas[MAX_AREAS];
  34. int logic_iomem_add_region(struct resource *resource,
  35. const struct logic_iomem_region_ops *ops)
  36. {
  37. struct logic_iomem_region *rreg;
  38. int err;
  39. if (WARN_ON(!resource || !ops))
  40. return -EINVAL;
  41. if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
  42. return -EINVAL;
  43. rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
  44. if (!rreg)
  45. return -ENOMEM;
  46. err = request_resource(&iomem_resource, resource);
  47. if (err) {
  48. kfree(rreg);
  49. return -ENOMEM;
  50. }
  51. mutex_lock(&regions_mtx);
  52. rreg->res = resource;
  53. rreg->ops = ops;
  54. list_add_tail(&rreg->list, &regions_list);
  55. mutex_unlock(&regions_mtx);
  56. return 0;
  57. }
  58. EXPORT_SYMBOL(logic_iomem_add_region);
  59. #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
  60. static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
  61. {
  62. WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
  63. (unsigned long long)offset, size);
  64. return NULL;
  65. }
  66. static void real_iounmap(volatile void __iomem *addr)
  67. {
  68. WARN(1, "invalid iounmap for addr 0x%llx\n",
  69. (unsigned long long)(uintptr_t __force)addr);
  70. }
  71. #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
  72. void __iomem *ioremap(phys_addr_t offset, size_t size)
  73. {
  74. void __iomem *ret = NULL;
  75. struct logic_iomem_region *rreg, *found = NULL;
  76. int i;
  77. mutex_lock(&regions_mtx);
  78. list_for_each_entry(rreg, &regions_list, list) {
  79. if (rreg->res->start > offset)
  80. continue;
  81. if (rreg->res->end < offset + size - 1)
  82. continue;
  83. found = rreg;
  84. break;
  85. }
  86. if (!found)
  87. goto out;
  88. for (i = 0; i < MAX_AREAS; i++) {
  89. long offs;
  90. if (mapped_areas[i].ops)
  91. continue;
  92. offs = rreg->ops->map(offset - found->res->start,
  93. size, &mapped_areas[i].ops,
  94. &mapped_areas[i].priv);
  95. if (offs < 0) {
  96. mapped_areas[i].ops = NULL;
  97. break;
  98. }
  99. if (WARN_ON(!mapped_areas[i].ops)) {
  100. mapped_areas[i].ops = NULL;
  101. break;
  102. }
  103. ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs);
  104. break;
  105. }
  106. out:
  107. mutex_unlock(&regions_mtx);
  108. if (ret)
  109. return ret;
  110. return real_ioremap(offset, size);
  111. }
  112. EXPORT_SYMBOL(ioremap);
  113. static inline struct logic_iomem_area *
  114. get_area(const volatile void __iomem *addr)
  115. {
  116. unsigned long a = (unsigned long)addr;
  117. unsigned int idx;
  118. if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS))
  119. return NULL;
  120. idx = (a & AREA_BITS) >> AREA_SHIFT;
  121. if (mapped_areas[idx].ops)
  122. return &mapped_areas[idx];
  123. return NULL;
  124. }
  125. void iounmap(volatile void __iomem *addr)
  126. {
  127. struct logic_iomem_area *area = get_area(addr);
  128. if (!area) {
  129. real_iounmap(addr);
  130. return;
  131. }
  132. if (area->ops->unmap)
  133. area->ops->unmap(area->priv);
  134. mutex_lock(&regions_mtx);
  135. area->ops = NULL;
  136. area->priv = NULL;
  137. mutex_unlock(&regions_mtx);
  138. }
  139. EXPORT_SYMBOL(iounmap);
  140. #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
  141. #define MAKE_FALLBACK(op, sz) \
  142. static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
  143. { \
  144. WARN(1, "Invalid read" #op " at address %llx\n", \
  145. (unsigned long long)(uintptr_t __force)addr); \
  146. return (u ## sz)~0ULL; \
  147. } \
  148. \
  149. static void real_raw_write ## op(u ## sz val, \
  150. volatile void __iomem *addr) \
  151. { \
  152. WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
  153. (unsigned long long)val, \
  154. (unsigned long long)(uintptr_t __force)addr);\
  155. } \
  156. MAKE_FALLBACK(b, 8);
  157. MAKE_FALLBACK(w, 16);
  158. MAKE_FALLBACK(l, 32);
  159. #ifdef CONFIG_64BIT
  160. MAKE_FALLBACK(q, 64);
  161. #endif
  162. static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
  163. {
  164. WARN(1, "Invalid memset_io at address 0x%llx\n",
  165. (unsigned long long)(uintptr_t __force)addr);
  166. }
  167. static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
  168. size_t size)
  169. {
  170. WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
  171. (unsigned long long)(uintptr_t __force)addr);
  172. memset(buffer, 0xff, size);
  173. }
  174. static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
  175. size_t size)
  176. {
  177. WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
  178. (unsigned long long)(uintptr_t __force)addr);
  179. }
  180. #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
  181. #define MAKE_OP(op, sz) \
  182. u##sz __raw_read ## op(const volatile void __iomem *addr) \
  183. { \
  184. struct logic_iomem_area *area = get_area(addr); \
  185. \
  186. if (!area) \
  187. return real_raw_read ## op(addr); \
  188. \
  189. return (u ## sz) area->ops->read(area->priv, \
  190. (unsigned long)addr & AREA_MASK,\
  191. sz / 8); \
  192. } \
  193. EXPORT_SYMBOL(__raw_read ## op); \
  194. \
  195. void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \
  196. { \
  197. struct logic_iomem_area *area = get_area(addr); \
  198. \
  199. if (!area) { \
  200. real_raw_write ## op(val, addr); \
  201. return; \
  202. } \
  203. \
  204. area->ops->write(area->priv, \
  205. (unsigned long)addr & AREA_MASK, \
  206. sz / 8, val); \
  207. } \
  208. EXPORT_SYMBOL(__raw_write ## op)
  209. MAKE_OP(b, 8);
  210. MAKE_OP(w, 16);
  211. MAKE_OP(l, 32);
  212. #ifdef CONFIG_64BIT
  213. MAKE_OP(q, 64);
  214. #endif
  215. void memset_io(volatile void __iomem *addr, int value, size_t size)
  216. {
  217. struct logic_iomem_area *area = get_area(addr);
  218. unsigned long offs, start;
  219. if (!area) {
  220. real_memset_io(addr, value, size);
  221. return;
  222. }
  223. start = (unsigned long)addr & AREA_MASK;
  224. if (area->ops->set) {
  225. area->ops->set(area->priv, start, value, size);
  226. return;
  227. }
  228. for (offs = 0; offs < size; offs++)
  229. area->ops->write(area->priv, start + offs, 1, value);
  230. }
  231. EXPORT_SYMBOL(memset_io);
  232. void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
  233. size_t size)
  234. {
  235. struct logic_iomem_area *area = get_area(addr);
  236. u8 *buf = buffer;
  237. unsigned long offs, start;
  238. if (!area) {
  239. real_memcpy_fromio(buffer, addr, size);
  240. return;
  241. }
  242. start = (unsigned long)addr & AREA_MASK;
  243. if (area->ops->copy_from) {
  244. area->ops->copy_from(area->priv, buffer, start, size);
  245. return;
  246. }
  247. for (offs = 0; offs < size; offs++)
  248. buf[offs] = area->ops->read(area->priv, start + offs, 1);
  249. }
  250. EXPORT_SYMBOL(memcpy_fromio);
  251. void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size)
  252. {
  253. struct logic_iomem_area *area = get_area(addr);
  254. const u8 *buf = buffer;
  255. unsigned long offs, start;
  256. if (!area) {
  257. real_memcpy_toio(addr, buffer, size);
  258. return;
  259. }
  260. start = (unsigned long)addr & AREA_MASK;
  261. if (area->ops->copy_to) {
  262. area->ops->copy_to(area->priv, start, buffer, size);
  263. return;
  264. }
  265. for (offs = 0; offs < size; offs++)
  266. area->ops->write(area->priv, start + offs, 1, buf[offs]);
  267. }
  268. EXPORT_SYMBOL(memcpy_toio);