mfd-core.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * drivers/mfd/mfd-core.c
  4. *
  5. * core MFD support
  6. * Copyright (c) 2006 Ian Molton
  7. * Copyright (c) 2007,2008 Dmitry Baryshkov
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/acpi.h>
  12. #include <linux/list.h>
  13. #include <linux/property.h>
  14. #include <linux/mfd/core.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/slab.h>
  17. #include <linux/module.h>
  18. #include <linux/irqdomain.h>
  19. #include <linux/of.h>
  20. #include <linux/of_address.h>
  21. #include <linux/regulator/consumer.h>
  22. static LIST_HEAD(mfd_of_node_list);
  23. struct mfd_of_node_entry {
  24. struct list_head list;
  25. struct device *dev;
  26. struct device_node *np;
  27. };
  28. static struct device_type mfd_dev_type = {
  29. .name = "mfd_device",
  30. };
  31. int mfd_cell_enable(struct platform_device *pdev)
  32. {
  33. const struct mfd_cell *cell = mfd_get_cell(pdev);
  34. if (!cell->enable) {
  35. dev_dbg(&pdev->dev, "No .enable() call-back registered\n");
  36. return 0;
  37. }
  38. return cell->enable(pdev);
  39. }
  40. EXPORT_SYMBOL(mfd_cell_enable);
  41. int mfd_cell_disable(struct platform_device *pdev)
  42. {
  43. const struct mfd_cell *cell = mfd_get_cell(pdev);
  44. if (!cell->disable) {
  45. dev_dbg(&pdev->dev, "No .disable() call-back registered\n");
  46. return 0;
  47. }
  48. return cell->disable(pdev);
  49. }
  50. EXPORT_SYMBOL(mfd_cell_disable);
  51. #if IS_ENABLED(CONFIG_ACPI)
  52. struct match_ids_walk_data {
  53. struct acpi_device_id *ids;
  54. struct acpi_device *adev;
  55. };
  56. static int match_device_ids(struct acpi_device *adev, void *data)
  57. {
  58. struct match_ids_walk_data *wd = data;
  59. if (!acpi_match_device_ids(adev, wd->ids)) {
  60. wd->adev = adev;
  61. return 1;
  62. }
  63. return 0;
  64. }
  65. static void mfd_acpi_add_device(const struct mfd_cell *cell,
  66. struct platform_device *pdev)
  67. {
  68. const struct mfd_cell_acpi_match *match = cell->acpi_match;
  69. struct acpi_device *adev = NULL;
  70. struct acpi_device *parent;
  71. parent = ACPI_COMPANION(pdev->dev.parent);
  72. if (!parent)
  73. return;
  74. /*
  75. * MFD child device gets its ACPI handle either from the ACPI device
  76. * directly under the parent that matches the either _HID or _CID, or
  77. * _ADR or it will use the parent handle if is no ID is given.
  78. *
  79. * Note that use of _ADR is a grey area in the ACPI specification,
  80. * though at least Intel Galileo Gen 2 is using it to distinguish
  81. * the children devices.
  82. */
  83. if (match) {
  84. if (match->pnpid) {
  85. struct acpi_device_id ids[2] = {};
  86. struct match_ids_walk_data wd = {
  87. .adev = NULL,
  88. .ids = ids,
  89. };
  90. strscpy(ids[0].id, match->pnpid, sizeof(ids[0].id));
  91. acpi_dev_for_each_child(parent, match_device_ids, &wd);
  92. adev = wd.adev;
  93. } else {
  94. adev = acpi_find_child_device(parent, match->adr, false);
  95. }
  96. }
  97. ACPI_COMPANION_SET(&pdev->dev, adev ?: parent);
  98. }
  99. #else
  100. static inline void mfd_acpi_add_device(const struct mfd_cell *cell,
  101. struct platform_device *pdev)
  102. {
  103. }
  104. #endif
  105. static int mfd_match_of_node_to_dev(struct platform_device *pdev,
  106. struct device_node *np,
  107. const struct mfd_cell *cell)
  108. {
  109. #if IS_ENABLED(CONFIG_OF)
  110. struct mfd_of_node_entry *of_entry;
  111. const __be32 *reg;
  112. u64 of_node_addr;
  113. /* Skip if OF node has previously been allocated to a device */
  114. list_for_each_entry(of_entry, &mfd_of_node_list, list)
  115. if (of_entry->np == np)
  116. return -EAGAIN;
  117. if (!cell->use_of_reg)
  118. /* No of_reg defined - allocate first free compatible match */
  119. goto allocate_of_node;
  120. /* We only care about each node's first defined address */
  121. reg = of_get_address(np, 0, NULL, NULL);
  122. if (!reg)
  123. /* OF node does not contatin a 'reg' property to match to */
  124. return -EAGAIN;
  125. of_node_addr = of_read_number(reg, of_n_addr_cells(np));
  126. if (cell->of_reg != of_node_addr)
  127. /* No match */
  128. return -EAGAIN;
  129. allocate_of_node:
  130. of_entry = kzalloc(sizeof(*of_entry), GFP_KERNEL);
  131. if (!of_entry)
  132. return -ENOMEM;
  133. of_entry->dev = &pdev->dev;
  134. of_entry->np = np;
  135. list_add_tail(&of_entry->list, &mfd_of_node_list);
  136. pdev->dev.of_node = np;
  137. pdev->dev.fwnode = &np->fwnode;
  138. #endif
  139. return 0;
  140. }
  141. static int mfd_add_device(struct device *parent, int id,
  142. const struct mfd_cell *cell,
  143. struct resource *mem_base,
  144. int irq_base, struct irq_domain *domain)
  145. {
  146. struct resource *res;
  147. struct platform_device *pdev;
  148. struct device_node *np = NULL;
  149. struct mfd_of_node_entry *of_entry, *tmp;
  150. bool disabled = false;
  151. int ret = -ENOMEM;
  152. int platform_id;
  153. int r;
  154. if (id == PLATFORM_DEVID_AUTO)
  155. platform_id = id;
  156. else
  157. platform_id = id + cell->id;
  158. pdev = platform_device_alloc(cell->name, platform_id);
  159. if (!pdev)
  160. goto fail_alloc;
  161. pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
  162. if (!pdev->mfd_cell)
  163. goto fail_device;
  164. res = kcalloc(cell->num_resources, sizeof(*res), GFP_KERNEL);
  165. if (!res)
  166. goto fail_device;
  167. pdev->dev.parent = parent;
  168. pdev->dev.type = &mfd_dev_type;
  169. pdev->dev.dma_mask = parent->dma_mask;
  170. pdev->dev.dma_parms = parent->dma_parms;
  171. pdev->dev.coherent_dma_mask = parent->coherent_dma_mask;
  172. ret = regulator_bulk_register_supply_alias(
  173. &pdev->dev, cell->parent_supplies,
  174. parent, cell->parent_supplies,
  175. cell->num_parent_supplies);
  176. if (ret < 0)
  177. goto fail_res;
  178. if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
  179. for_each_child_of_node(parent->of_node, np) {
  180. if (of_device_is_compatible(np, cell->of_compatible)) {
  181. /* Skip 'disabled' devices */
  182. if (!of_device_is_available(np)) {
  183. disabled = true;
  184. continue;
  185. }
  186. ret = mfd_match_of_node_to_dev(pdev, np, cell);
  187. if (ret == -EAGAIN)
  188. continue;
  189. of_node_put(np);
  190. if (ret)
  191. goto fail_alias;
  192. goto match;
  193. }
  194. }
  195. if (disabled) {
  196. /* Ignore 'disabled' devices error free */
  197. ret = 0;
  198. goto fail_alias;
  199. }
  200. match:
  201. if (!pdev->dev.of_node)
  202. pr_warn("%s: Failed to locate of_node [id: %d]\n",
  203. cell->name, platform_id);
  204. }
  205. mfd_acpi_add_device(cell, pdev);
  206. if (cell->pdata_size) {
  207. ret = platform_device_add_data(pdev,
  208. cell->platform_data, cell->pdata_size);
  209. if (ret)
  210. goto fail_of_entry;
  211. }
  212. if (cell->swnode) {
  213. ret = device_add_software_node(&pdev->dev, cell->swnode);
  214. if (ret)
  215. goto fail_of_entry;
  216. }
  217. for (r = 0; r < cell->num_resources; r++) {
  218. res[r].name = cell->resources[r].name;
  219. res[r].flags = cell->resources[r].flags;
  220. /* Find out base to use */
  221. if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) {
  222. res[r].parent = mem_base;
  223. res[r].start = mem_base->start +
  224. cell->resources[r].start;
  225. res[r].end = mem_base->start +
  226. cell->resources[r].end;
  227. } else if (cell->resources[r].flags & IORESOURCE_IRQ) {
  228. if (domain) {
  229. /* Unable to create mappings for IRQ ranges. */
  230. WARN_ON(cell->resources[r].start !=
  231. cell->resources[r].end);
  232. res[r].start = res[r].end = irq_create_mapping(
  233. domain, cell->resources[r].start);
  234. } else {
  235. res[r].start = irq_base +
  236. cell->resources[r].start;
  237. res[r].end = irq_base +
  238. cell->resources[r].end;
  239. }
  240. } else {
  241. res[r].parent = cell->resources[r].parent;
  242. res[r].start = cell->resources[r].start;
  243. res[r].end = cell->resources[r].end;
  244. }
  245. if (!cell->ignore_resource_conflicts) {
  246. if (has_acpi_companion(&pdev->dev)) {
  247. ret = acpi_check_resource_conflict(&res[r]);
  248. if (ret)
  249. goto fail_res_conflict;
  250. }
  251. }
  252. }
  253. ret = platform_device_add_resources(pdev, res, cell->num_resources);
  254. if (ret)
  255. goto fail_res_conflict;
  256. ret = platform_device_add(pdev);
  257. if (ret)
  258. goto fail_res_conflict;
  259. if (cell->pm_runtime_no_callbacks)
  260. pm_runtime_no_callbacks(&pdev->dev);
  261. kfree(res);
  262. return 0;
  263. fail_res_conflict:
  264. if (cell->swnode)
  265. device_remove_software_node(&pdev->dev);
  266. fail_of_entry:
  267. list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list)
  268. if (of_entry->dev == &pdev->dev) {
  269. list_del(&of_entry->list);
  270. kfree(of_entry);
  271. }
  272. fail_alias:
  273. regulator_bulk_unregister_supply_alias(&pdev->dev,
  274. cell->parent_supplies,
  275. cell->num_parent_supplies);
  276. fail_res:
  277. kfree(res);
  278. fail_device:
  279. platform_device_put(pdev);
  280. fail_alloc:
  281. return ret;
  282. }
  283. /**
  284. * mfd_add_devices - register child devices
  285. *
  286. * @parent: Pointer to parent device.
  287. * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care
  288. * of device numbering, or will be added to a device's cell_id.
  289. * @cells: Array of (struct mfd_cell)s describing child devices.
  290. * @n_devs: Number of child devices to register.
  291. * @mem_base: Parent register range resource for child devices.
  292. * @irq_base: Base of the range of virtual interrupt numbers allocated for
  293. * this MFD device. Unused if @domain is specified.
  294. * @domain: Interrupt domain to create mappings for hardware interrupts.
  295. */
  296. int mfd_add_devices(struct device *parent, int id,
  297. const struct mfd_cell *cells, int n_devs,
  298. struct resource *mem_base,
  299. int irq_base, struct irq_domain *domain)
  300. {
  301. int i;
  302. int ret;
  303. for (i = 0; i < n_devs; i++) {
  304. ret = mfd_add_device(parent, id, cells + i, mem_base,
  305. irq_base, domain);
  306. if (ret)
  307. goto fail;
  308. }
  309. return 0;
  310. fail:
  311. if (i)
  312. mfd_remove_devices(parent);
  313. return ret;
  314. }
  315. EXPORT_SYMBOL(mfd_add_devices);
  316. static int mfd_remove_devices_fn(struct device *dev, void *data)
  317. {
  318. struct platform_device *pdev;
  319. const struct mfd_cell *cell;
  320. struct mfd_of_node_entry *of_entry, *tmp;
  321. int *level = data;
  322. if (dev->type != &mfd_dev_type)
  323. return 0;
  324. pdev = to_platform_device(dev);
  325. cell = mfd_get_cell(pdev);
  326. if (level && cell->level > *level)
  327. return 0;
  328. if (cell->swnode)
  329. device_remove_software_node(&pdev->dev);
  330. list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list)
  331. if (of_entry->dev == &pdev->dev) {
  332. list_del(&of_entry->list);
  333. kfree(of_entry);
  334. }
  335. regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
  336. cell->num_parent_supplies);
  337. platform_device_unregister(pdev);
  338. return 0;
  339. }
  340. void mfd_remove_devices_late(struct device *parent)
  341. {
  342. int level = MFD_DEP_LEVEL_HIGH;
  343. device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn);
  344. }
  345. EXPORT_SYMBOL(mfd_remove_devices_late);
  346. void mfd_remove_devices(struct device *parent)
  347. {
  348. int level = MFD_DEP_LEVEL_NORMAL;
  349. device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn);
  350. }
  351. EXPORT_SYMBOL(mfd_remove_devices);
  352. static void devm_mfd_dev_release(struct device *dev, void *res)
  353. {
  354. mfd_remove_devices(dev);
  355. }
  356. /**
  357. * devm_mfd_add_devices - Resource managed version of mfd_add_devices()
  358. *
  359. * Returns 0 on success or an appropriate negative error number on failure.
  360. * All child-devices of the MFD will automatically be removed when it gets
  361. * unbinded.
  362. *
  363. * @dev: Pointer to parent device.
  364. * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care
  365. * of device numbering, or will be added to a device's cell_id.
  366. * @cells: Array of (struct mfd_cell)s describing child devices.
  367. * @n_devs: Number of child devices to register.
  368. * @mem_base: Parent register range resource for child devices.
  369. * @irq_base: Base of the range of virtual interrupt numbers allocated for
  370. * this MFD device. Unused if @domain is specified.
  371. * @domain: Interrupt domain to create mappings for hardware interrupts.
  372. */
  373. int devm_mfd_add_devices(struct device *dev, int id,
  374. const struct mfd_cell *cells, int n_devs,
  375. struct resource *mem_base,
  376. int irq_base, struct irq_domain *domain)
  377. {
  378. struct device **ptr;
  379. int ret;
  380. ptr = devres_alloc(devm_mfd_dev_release, sizeof(*ptr), GFP_KERNEL);
  381. if (!ptr)
  382. return -ENOMEM;
  383. ret = mfd_add_devices(dev, id, cells, n_devs, mem_base,
  384. irq_base, domain);
  385. if (ret < 0) {
  386. devres_free(ptr);
  387. return ret;
  388. }
  389. *ptr = dev;
  390. devres_add(dev, ptr);
  391. return ret;
  392. }
  393. EXPORT_SYMBOL(devm_mfd_add_devices);
  394. MODULE_LICENSE("GPL");
  395. MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");