iomap.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/memremap.h>
  6. #include <linux/rculist.h>
  7. #include <linux/export.h>
  8. #include <linux/ioport.h>
  9. #include <linux/module.h>
  10. #include <linux/types.h>
  11. #include <linux/pfn_t.h>
  12. #include <linux/acpi.h>
  13. #include <linux/io.h>
  14. #include <linux/mm.h>
  15. #include "nfit_test.h"
  16. static LIST_HEAD(iomap_head);
  17. static struct iomap_ops {
  18. nfit_test_lookup_fn nfit_test_lookup;
  19. nfit_test_evaluate_dsm_fn evaluate_dsm;
  20. struct list_head list;
  21. } iomap_ops = {
  22. .list = LIST_HEAD_INIT(iomap_ops.list),
  23. };
  24. void nfit_test_setup(nfit_test_lookup_fn lookup,
  25. nfit_test_evaluate_dsm_fn evaluate)
  26. {
  27. iomap_ops.nfit_test_lookup = lookup;
  28. iomap_ops.evaluate_dsm = evaluate;
  29. list_add_rcu(&iomap_ops.list, &iomap_head);
  30. }
  31. EXPORT_SYMBOL(nfit_test_setup);
  32. void nfit_test_teardown(void)
  33. {
  34. list_del_rcu(&iomap_ops.list);
  35. synchronize_rcu();
  36. }
  37. EXPORT_SYMBOL(nfit_test_teardown);
  38. static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
  39. {
  40. struct iomap_ops *ops;
  41. ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
  42. if (ops)
  43. return ops->nfit_test_lookup(resource);
  44. return NULL;
  45. }
  46. struct nfit_test_resource *get_nfit_res(resource_size_t resource)
  47. {
  48. struct nfit_test_resource *res;
  49. rcu_read_lock();
  50. res = __get_nfit_res(resource);
  51. rcu_read_unlock();
  52. return res;
  53. }
  54. EXPORT_SYMBOL(get_nfit_res);
  55. #define __nfit_test_ioremap(offset, size, fallback_fn) ({ \
  56. struct nfit_test_resource *nfit_res = get_nfit_res(offset); \
  57. nfit_res ? \
  58. (void __iomem *) nfit_res->buf + (offset) \
  59. - nfit_res->res.start \
  60. : \
  61. fallback_fn((offset), (size)) ; \
  62. })
  63. void __iomem *__wrap_devm_ioremap(struct device *dev,
  64. resource_size_t offset, unsigned long size)
  65. {
  66. struct nfit_test_resource *nfit_res = get_nfit_res(offset);
  67. if (nfit_res)
  68. return (void __iomem *) nfit_res->buf + offset
  69. - nfit_res->res.start;
  70. return devm_ioremap(dev, offset, size);
  71. }
  72. EXPORT_SYMBOL(__wrap_devm_ioremap);
  73. void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
  74. size_t size, unsigned long flags)
  75. {
  76. struct nfit_test_resource *nfit_res = get_nfit_res(offset);
  77. if (nfit_res)
  78. return nfit_res->buf + offset - nfit_res->res.start;
  79. return devm_memremap(dev, offset, size, flags);
  80. }
  81. EXPORT_SYMBOL(__wrap_devm_memremap);
  82. static void nfit_test_kill(void *_pgmap)
  83. {
  84. struct dev_pagemap *pgmap = _pgmap;
  85. WARN_ON(!pgmap);
  86. percpu_ref_kill(&pgmap->ref);
  87. wait_for_completion(&pgmap->done);
  88. percpu_ref_exit(&pgmap->ref);
  89. }
  90. static void dev_pagemap_percpu_release(struct percpu_ref *ref)
  91. {
  92. struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
  93. complete(&pgmap->done);
  94. }
  95. void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
  96. {
  97. int error;
  98. resource_size_t offset = pgmap->range.start;
  99. struct nfit_test_resource *nfit_res = get_nfit_res(offset);
  100. if (!nfit_res)
  101. return devm_memremap_pages(dev, pgmap);
  102. init_completion(&pgmap->done);
  103. error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
  104. GFP_KERNEL);
  105. if (error)
  106. return ERR_PTR(error);
  107. error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
  108. if (error)
  109. return ERR_PTR(error);
  110. return nfit_res->buf + offset - nfit_res->res.start;
  111. }
  112. EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
  113. pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
  114. {
  115. struct nfit_test_resource *nfit_res = get_nfit_res(addr);
  116. if (nfit_res)
  117. flags &= ~PFN_MAP;
  118. return phys_to_pfn_t(addr, flags);
  119. }
  120. EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
  121. void *__wrap_memremap(resource_size_t offset, size_t size,
  122. unsigned long flags)
  123. {
  124. struct nfit_test_resource *nfit_res = get_nfit_res(offset);
  125. if (nfit_res)
  126. return nfit_res->buf + offset - nfit_res->res.start;
  127. return memremap(offset, size, flags);
  128. }
  129. EXPORT_SYMBOL(__wrap_memremap);
  130. void __wrap_devm_memunmap(struct device *dev, void *addr)
  131. {
  132. struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
  133. if (nfit_res)
  134. return;
  135. return devm_memunmap(dev, addr);
  136. }
  137. EXPORT_SYMBOL(__wrap_devm_memunmap);
  138. void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size)
  139. {
  140. return __nfit_test_ioremap(offset, size, ioremap);
  141. }
  142. EXPORT_SYMBOL(__wrap_ioremap);
  143. void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
  144. {
  145. return __nfit_test_ioremap(offset, size, ioremap_wc);
  146. }
  147. EXPORT_SYMBOL(__wrap_ioremap_wc);
  148. void __wrap_iounmap(volatile void __iomem *addr)
  149. {
  150. struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
  151. if (nfit_res)
  152. return;
  153. return iounmap(addr);
  154. }
  155. EXPORT_SYMBOL(__wrap_iounmap);
  156. void __wrap_memunmap(void *addr)
  157. {
  158. struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
  159. if (nfit_res)
  160. return;
  161. return memunmap(addr);
  162. }
  163. EXPORT_SYMBOL(__wrap_memunmap);
  164. static bool nfit_test_release_region(struct device *dev,
  165. struct resource *parent, resource_size_t start,
  166. resource_size_t n);
  167. static void nfit_devres_release(struct device *dev, void *data)
  168. {
  169. struct resource *res = *((struct resource **) data);
  170. WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
  171. resource_size(res)));
  172. }
  173. static int match(struct device *dev, void *__res, void *match_data)
  174. {
  175. struct resource *res = *((struct resource **) __res);
  176. resource_size_t start = *((resource_size_t *) match_data);
  177. return res->start == start;
  178. }
  179. static bool nfit_test_release_region(struct device *dev,
  180. struct resource *parent, resource_size_t start,
  181. resource_size_t n)
  182. {
  183. if (parent == &iomem_resource) {
  184. struct nfit_test_resource *nfit_res = get_nfit_res(start);
  185. if (nfit_res) {
  186. struct nfit_test_request *req;
  187. struct resource *res = NULL;
  188. if (dev) {
  189. devres_release(dev, nfit_devres_release, match,
  190. &start);
  191. return true;
  192. }
  193. spin_lock(&nfit_res->lock);
  194. list_for_each_entry(req, &nfit_res->requests, list)
  195. if (req->res.start == start) {
  196. res = &req->res;
  197. list_del(&req->list);
  198. break;
  199. }
  200. spin_unlock(&nfit_res->lock);
  201. WARN(!res || resource_size(res) != n,
  202. "%s: start: %llx n: %llx mismatch: %pr\n",
  203. __func__, start, n, res);
  204. if (res)
  205. kfree(req);
  206. return true;
  207. }
  208. }
  209. return false;
  210. }
  211. static struct resource *nfit_test_request_region(struct device *dev,
  212. struct resource *parent, resource_size_t start,
  213. resource_size_t n, const char *name, int flags)
  214. {
  215. struct nfit_test_resource *nfit_res;
  216. if (parent == &iomem_resource) {
  217. nfit_res = get_nfit_res(start);
  218. if (nfit_res) {
  219. struct nfit_test_request *req;
  220. struct resource *res = NULL;
  221. if (start + n > nfit_res->res.start
  222. + resource_size(&nfit_res->res)) {
  223. pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
  224. __func__, start, n,
  225. &nfit_res->res);
  226. return NULL;
  227. }
  228. spin_lock(&nfit_res->lock);
  229. list_for_each_entry(req, &nfit_res->requests, list)
  230. if (start == req->res.start) {
  231. res = &req->res;
  232. break;
  233. }
  234. spin_unlock(&nfit_res->lock);
  235. if (res) {
  236. WARN(1, "%pr already busy\n", res);
  237. return NULL;
  238. }
  239. req = kzalloc(sizeof(*req), GFP_KERNEL);
  240. if (!req)
  241. return NULL;
  242. INIT_LIST_HEAD(&req->list);
  243. res = &req->res;
  244. res->start = start;
  245. res->end = start + n - 1;
  246. res->name = name;
  247. res->flags = resource_type(parent);
  248. res->flags |= IORESOURCE_BUSY | flags;
  249. spin_lock(&nfit_res->lock);
  250. list_add(&req->list, &nfit_res->requests);
  251. spin_unlock(&nfit_res->lock);
  252. if (dev) {
  253. struct resource **d;
  254. d = devres_alloc(nfit_devres_release,
  255. sizeof(struct resource *),
  256. GFP_KERNEL);
  257. if (!d)
  258. return NULL;
  259. *d = res;
  260. devres_add(dev, d);
  261. }
  262. pr_debug("%s: %pr\n", __func__, res);
  263. return res;
  264. }
  265. }
  266. if (dev)
  267. return __devm_request_region(dev, parent, start, n, name);
  268. return __request_region(parent, start, n, name, flags);
  269. }
  270. struct resource *__wrap___request_region(struct resource *parent,
  271. resource_size_t start, resource_size_t n, const char *name,
  272. int flags)
  273. {
  274. return nfit_test_request_region(NULL, parent, start, n, name, flags);
  275. }
  276. EXPORT_SYMBOL(__wrap___request_region);
  277. int __wrap_insert_resource(struct resource *parent, struct resource *res)
  278. {
  279. if (get_nfit_res(res->start))
  280. return 0;
  281. return insert_resource(parent, res);
  282. }
  283. EXPORT_SYMBOL(__wrap_insert_resource);
  284. int __wrap_remove_resource(struct resource *res)
  285. {
  286. if (get_nfit_res(res->start))
  287. return 0;
  288. return remove_resource(res);
  289. }
  290. EXPORT_SYMBOL(__wrap_remove_resource);
  291. struct resource *__wrap___devm_request_region(struct device *dev,
  292. struct resource *parent, resource_size_t start,
  293. resource_size_t n, const char *name)
  294. {
  295. if (!dev)
  296. return NULL;
  297. return nfit_test_request_region(dev, parent, start, n, name, 0);
  298. }
  299. EXPORT_SYMBOL(__wrap___devm_request_region);
  300. void __wrap___release_region(struct resource *parent, resource_size_t start,
  301. resource_size_t n)
  302. {
  303. if (!nfit_test_release_region(NULL, parent, start, n))
  304. __release_region(parent, start, n);
  305. }
  306. EXPORT_SYMBOL(__wrap___release_region);
  307. void __wrap___devm_release_region(struct device *dev, struct resource *parent,
  308. resource_size_t start, resource_size_t n)
  309. {
  310. if (!nfit_test_release_region(dev, parent, start, n))
  311. __devm_release_region(dev, parent, start, n);
  312. }
  313. EXPORT_SYMBOL(__wrap___devm_release_region);
  314. acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
  315. struct acpi_object_list *p, struct acpi_buffer *buf)
  316. {
  317. struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
  318. union acpi_object **obj;
  319. if (!nfit_res || strcmp(path, "_FIT") || !buf)
  320. return acpi_evaluate_object(handle, path, p, buf);
  321. obj = nfit_res->buf;
  322. buf->length = sizeof(union acpi_object);
  323. buf->pointer = *obj;
  324. return AE_OK;
  325. }
  326. EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
  327. union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
  328. u64 rev, u64 func, union acpi_object *argv4)
  329. {
  330. union acpi_object *obj = ERR_PTR(-ENXIO);
  331. struct iomap_ops *ops;
  332. rcu_read_lock();
  333. ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
  334. if (ops)
  335. obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
  336. rcu_read_unlock();
  337. if (IS_ERR(obj))
  338. return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
  339. return obj;
  340. }
  341. EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
  342. MODULE_LICENSE("GPL v2");