platform.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * platform.c - platform 'pseudo' bus for legacy devices
  4. *
  5. * Copyright (c) 2002-3 Patrick Mochel
  6. * Copyright (c) 2002-3 Open Source Development Labs
  7. *
  8. * Please see Documentation/driver-api/driver-model/platform.rst for more
  9. * information.
  10. */
  11. #include <linux/string.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/of_device.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/ioport.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/memblock.h>
  21. #include <linux/err.h>
  22. #include <linux/slab.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/pm_domain.h>
  25. #include <linux/idr.h>
  26. #include <linux/acpi.h>
  27. #include <linux/clk/clk-conf.h>
  28. #include <linux/limits.h>
  29. #include <linux/property.h>
  30. #include <linux/kmemleak.h>
  31. #include <linux/types.h>
  32. #include <linux/iommu.h>
  33. #include <linux/dma-map-ops.h>
  34. #include "base.h"
  35. #include "power/power.h"
  36. /* For automatically allocated device IDs */
  37. static DEFINE_IDA(platform_devid_ida);
  38. struct device platform_bus = {
  39. .init_name = "platform",
  40. };
  41. EXPORT_SYMBOL_GPL(platform_bus);
  42. /**
  43. * platform_get_resource - get a resource for a device
  44. * @dev: platform device
  45. * @type: resource type
  46. * @num: resource index
  47. *
  48. * Return: a pointer to the resource or NULL on failure.
  49. */
  50. struct resource *platform_get_resource(struct platform_device *dev,
  51. unsigned int type, unsigned int num)
  52. {
  53. u32 i;
  54. for (i = 0; i < dev->num_resources; i++) {
  55. struct resource *r = &dev->resource[i];
  56. if (type == resource_type(r) && num-- == 0)
  57. return r;
  58. }
  59. return NULL;
  60. }
  61. EXPORT_SYMBOL_GPL(platform_get_resource);
  62. struct resource *platform_get_mem_or_io(struct platform_device *dev,
  63. unsigned int num)
  64. {
  65. u32 i;
  66. for (i = 0; i < dev->num_resources; i++) {
  67. struct resource *r = &dev->resource[i];
  68. if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
  69. return r;
  70. }
  71. return NULL;
  72. }
  73. EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
  74. #ifdef CONFIG_HAS_IOMEM
  75. /**
  76. * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
  77. * platform device and get resource
  78. *
  79. * @pdev: platform device to use both for memory resource lookup as well as
  80. * resource management
  81. * @index: resource index
  82. * @res: optional output parameter to store a pointer to the obtained resource.
  83. *
  84. * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
  85. * on failure.
  86. */
  87. void __iomem *
  88. devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
  89. unsigned int index, struct resource **res)
  90. {
  91. struct resource *r;
  92. r = platform_get_resource(pdev, IORESOURCE_MEM, index);
  93. if (res)
  94. *res = r;
  95. return devm_ioremap_resource(&pdev->dev, r);
  96. }
  97. EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
  98. /**
  99. * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
  100. * device
  101. *
  102. * @pdev: platform device to use both for memory resource lookup as well as
  103. * resource management
  104. * @index: resource index
  105. *
  106. * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
  107. * on failure.
  108. */
  109. void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
  110. unsigned int index)
  111. {
  112. return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
  113. }
  114. EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
  115. /**
  116. * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
  117. * a platform device, retrieve the
  118. * resource by name
  119. *
  120. * @pdev: platform device to use both for memory resource lookup as well as
  121. * resource management
  122. * @name: name of the resource
  123. *
  124. * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
  125. * on failure.
  126. */
  127. void __iomem *
  128. devm_platform_ioremap_resource_byname(struct platform_device *pdev,
  129. const char *name)
  130. {
  131. struct resource *res;
  132. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  133. return devm_ioremap_resource(&pdev->dev, res);
  134. }
  135. EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
  136. #endif /* CONFIG_HAS_IOMEM */
  137. /**
  138. * platform_get_irq_optional - get an optional IRQ for a device
  139. * @dev: platform device
  140. * @num: IRQ number index
  141. *
  142. * Gets an IRQ for a platform device. Device drivers should check the return
  143. * value for errors so as to not pass a negative integer value to the
  144. * request_irq() APIs. This is the same as platform_get_irq(), except that it
  145. * does not print an error message if an IRQ can not be obtained.
  146. *
  147. * For example::
  148. *
  149. * int irq = platform_get_irq_optional(pdev, 0);
  150. * if (irq < 0)
  151. * return irq;
  152. *
  153. * Return: non-zero IRQ number on success, negative error number on failure.
  154. */
  155. int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
  156. {
  157. int ret;
  158. #ifdef CONFIG_SPARC
  159. /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
  160. if (!dev || num >= dev->archdata.num_irqs)
  161. goto out_not_found;
  162. ret = dev->archdata.irqs[num];
  163. goto out;
  164. #else
  165. struct resource *r;
  166. if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
  167. ret = of_irq_get(dev->dev.of_node, num);
  168. if (ret > 0 || ret == -EPROBE_DEFER)
  169. goto out;
  170. }
  171. r = platform_get_resource(dev, IORESOURCE_IRQ, num);
  172. if (has_acpi_companion(&dev->dev)) {
  173. if (r && r->flags & IORESOURCE_DISABLED) {
  174. ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
  175. if (ret)
  176. goto out;
  177. }
  178. }
  179. /*
  180. * The resources may pass trigger flags to the irqs that need
  181. * to be set up. It so happens that the trigger flags for
  182. * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
  183. * settings.
  184. */
  185. if (r && r->flags & IORESOURCE_BITS) {
  186. struct irq_data *irqd;
  187. irqd = irq_get_irq_data(r->start);
  188. if (!irqd)
  189. goto out_not_found;
  190. irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
  191. }
  192. if (r) {
  193. ret = r->start;
  194. goto out;
  195. }
  196. /*
  197. * For the index 0 interrupt, allow falling back to GpioInt
  198. * resources. While a device could have both Interrupt and GpioInt
  199. * resources, making this fallback ambiguous, in many common cases
  200. * the device will only expose one IRQ, and this fallback
  201. * allows a common code path across either kind of resource.
  202. */
  203. if (num == 0 && has_acpi_companion(&dev->dev)) {
  204. ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
  205. /* Our callers expect -ENXIO for missing IRQs. */
  206. if (ret >= 0 || ret == -EPROBE_DEFER)
  207. goto out;
  208. }
  209. #endif
  210. out_not_found:
  211. ret = -ENXIO;
  212. out:
  213. if (WARN(!ret, "0 is an invalid IRQ number\n"))
  214. return -EINVAL;
  215. return ret;
  216. }
  217. EXPORT_SYMBOL_GPL(platform_get_irq_optional);
  218. /**
  219. * platform_get_irq - get an IRQ for a device
  220. * @dev: platform device
  221. * @num: IRQ number index
  222. *
  223. * Gets an IRQ for a platform device and prints an error message if finding the
  224. * IRQ fails. Device drivers should check the return value for errors so as to
  225. * not pass a negative integer value to the request_irq() APIs.
  226. *
  227. * For example::
  228. *
  229. * int irq = platform_get_irq(pdev, 0);
  230. * if (irq < 0)
  231. * return irq;
  232. *
  233. * Return: non-zero IRQ number on success, negative error number on failure.
  234. */
  235. int platform_get_irq(struct platform_device *dev, unsigned int num)
  236. {
  237. int ret;
  238. ret = platform_get_irq_optional(dev, num);
  239. if (ret < 0)
  240. return dev_err_probe(&dev->dev, ret,
  241. "IRQ index %u not found\n", num);
  242. return ret;
  243. }
  244. EXPORT_SYMBOL_GPL(platform_get_irq);
  245. /**
  246. * platform_irq_count - Count the number of IRQs a platform device uses
  247. * @dev: platform device
  248. *
  249. * Return: Number of IRQs a platform device uses or EPROBE_DEFER
  250. */
  251. int platform_irq_count(struct platform_device *dev)
  252. {
  253. int ret, nr = 0;
  254. while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
  255. nr++;
  256. if (ret == -EPROBE_DEFER)
  257. return ret;
  258. return nr;
  259. }
  260. EXPORT_SYMBOL_GPL(platform_irq_count);
  261. struct irq_affinity_devres {
  262. unsigned int count;
  263. unsigned int irq[];
  264. };
  265. static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
  266. {
  267. struct resource *r;
  268. r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
  269. if (r)
  270. irqresource_disabled(r, 0);
  271. }
  272. static void devm_platform_get_irqs_affinity_release(struct device *dev,
  273. void *res)
  274. {
  275. struct irq_affinity_devres *ptr = res;
  276. int i;
  277. for (i = 0; i < ptr->count; i++) {
  278. irq_dispose_mapping(ptr->irq[i]);
  279. if (has_acpi_companion(dev))
  280. platform_disable_acpi_irq(to_platform_device(dev), i);
  281. }
  282. }
  283. /**
  284. * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a
  285. * device using an interrupt affinity descriptor
  286. * @dev: platform device pointer
  287. * @affd: affinity descriptor
  288. * @minvec: minimum count of interrupt vectors
  289. * @maxvec: maximum count of interrupt vectors
  290. * @irqs: pointer holder for IRQ numbers
  291. *
  292. * Gets a set of IRQs for a platform device, and updates IRQ afffinty according
  293. * to the passed affinity descriptor
  294. *
  295. * Return: Number of vectors on success, negative error number on failure.
  296. */
  297. int devm_platform_get_irqs_affinity(struct platform_device *dev,
  298. struct irq_affinity *affd,
  299. unsigned int minvec,
  300. unsigned int maxvec,
  301. int **irqs)
  302. {
  303. struct irq_affinity_devres *ptr;
  304. struct irq_affinity_desc *desc;
  305. size_t size;
  306. int i, ret, nvec;
  307. if (!affd)
  308. return -EPERM;
  309. if (maxvec < minvec)
  310. return -ERANGE;
  311. nvec = platform_irq_count(dev);
  312. if (nvec < 0)
  313. return nvec;
  314. if (nvec < minvec)
  315. return -ENOSPC;
  316. nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
  317. if (nvec < minvec)
  318. return -ENOSPC;
  319. if (nvec > maxvec)
  320. nvec = maxvec;
  321. size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
  322. ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
  323. GFP_KERNEL);
  324. if (!ptr)
  325. return -ENOMEM;
  326. ptr->count = nvec;
  327. for (i = 0; i < nvec; i++) {
  328. int irq = platform_get_irq(dev, i);
  329. if (irq < 0) {
  330. ret = irq;
  331. goto err_free_devres;
  332. }
  333. ptr->irq[i] = irq;
  334. }
  335. desc = irq_create_affinity_masks(nvec, affd);
  336. if (!desc) {
  337. ret = -ENOMEM;
  338. goto err_free_devres;
  339. }
  340. for (i = 0; i < nvec; i++) {
  341. ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
  342. if (ret) {
  343. dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
  344. ptr->irq[i], ret);
  345. goto err_free_desc;
  346. }
  347. }
  348. devres_add(&dev->dev, ptr);
  349. kfree(desc);
  350. *irqs = ptr->irq;
  351. return nvec;
  352. err_free_desc:
  353. kfree(desc);
  354. err_free_devres:
  355. devres_free(ptr);
  356. return ret;
  357. }
  358. EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
  359. /**
  360. * platform_get_resource_byname - get a resource for a device by name
  361. * @dev: platform device
  362. * @type: resource type
  363. * @name: resource name
  364. */
  365. struct resource *platform_get_resource_byname(struct platform_device *dev,
  366. unsigned int type,
  367. const char *name)
  368. {
  369. u32 i;
  370. for (i = 0; i < dev->num_resources; i++) {
  371. struct resource *r = &dev->resource[i];
  372. if (unlikely(!r->name))
  373. continue;
  374. if (type == resource_type(r) && !strcmp(r->name, name))
  375. return r;
  376. }
  377. return NULL;
  378. }
  379. EXPORT_SYMBOL_GPL(platform_get_resource_byname);
  380. static int __platform_get_irq_byname(struct platform_device *dev,
  381. const char *name)
  382. {
  383. struct resource *r;
  384. int ret;
  385. if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
  386. ret = of_irq_get_byname(dev->dev.of_node, name);
  387. if (ret > 0 || ret == -EPROBE_DEFER)
  388. return ret;
  389. }
  390. r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
  391. if (r) {
  392. if (WARN(!r->start, "0 is an invalid IRQ number\n"))
  393. return -EINVAL;
  394. return r->start;
  395. }
  396. return -ENXIO;
  397. }
  398. /**
  399. * platform_get_irq_byname - get an IRQ for a device by name
  400. * @dev: platform device
  401. * @name: IRQ name
  402. *
  403. * Get an IRQ like platform_get_irq(), but then by name rather then by index.
  404. *
  405. * Return: non-zero IRQ number on success, negative error number on failure.
  406. */
  407. int platform_get_irq_byname(struct platform_device *dev, const char *name)
  408. {
  409. int ret;
  410. ret = __platform_get_irq_byname(dev, name);
  411. if (ret < 0)
  412. return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n",
  413. name);
  414. return ret;
  415. }
  416. EXPORT_SYMBOL_GPL(platform_get_irq_byname);
  417. /**
  418. * platform_get_irq_byname_optional - get an optional IRQ for a device by name
  419. * @dev: platform device
  420. * @name: IRQ name
  421. *
  422. * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
  423. * does not print an error message if an IRQ can not be obtained.
  424. *
  425. * Return: non-zero IRQ number on success, negative error number on failure.
  426. */
  427. int platform_get_irq_byname_optional(struct platform_device *dev,
  428. const char *name)
  429. {
  430. return __platform_get_irq_byname(dev, name);
  431. }
  432. EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
  433. /**
  434. * platform_add_devices - add a numbers of platform devices
  435. * @devs: array of platform devices to add
  436. * @num: number of platform devices in array
  437. */
  438. int platform_add_devices(struct platform_device **devs, int num)
  439. {
  440. int i, ret = 0;
  441. for (i = 0; i < num; i++) {
  442. ret = platform_device_register(devs[i]);
  443. if (ret) {
  444. while (--i >= 0)
  445. platform_device_unregister(devs[i]);
  446. break;
  447. }
  448. }
  449. return ret;
  450. }
  451. EXPORT_SYMBOL_GPL(platform_add_devices);
  452. struct platform_object {
  453. struct platform_device pdev;
  454. char name[];
  455. };
  456. /*
  457. * Set up default DMA mask for platform devices if the they weren't
  458. * previously set by the architecture / DT.
  459. */
  460. static void setup_pdev_dma_masks(struct platform_device *pdev)
  461. {
  462. pdev->dev.dma_parms = &pdev->dma_parms;
  463. if (!pdev->dev.coherent_dma_mask)
  464. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  465. if (!pdev->dev.dma_mask) {
  466. pdev->platform_dma_mask = DMA_BIT_MASK(32);
  467. pdev->dev.dma_mask = &pdev->platform_dma_mask;
  468. }
  469. };
  470. /**
  471. * platform_device_put - destroy a platform device
  472. * @pdev: platform device to free
  473. *
  474. * Free all memory associated with a platform device. This function must
  475. * _only_ be externally called in error cases. All other usage is a bug.
  476. */
  477. void platform_device_put(struct platform_device *pdev)
  478. {
  479. if (!IS_ERR_OR_NULL(pdev))
  480. put_device(&pdev->dev);
  481. }
  482. EXPORT_SYMBOL_GPL(platform_device_put);
  483. static void platform_device_release(struct device *dev)
  484. {
  485. struct platform_object *pa = container_of(dev, struct platform_object,
  486. pdev.dev);
  487. of_node_put(pa->pdev.dev.of_node);
  488. kfree(pa->pdev.dev.platform_data);
  489. kfree(pa->pdev.mfd_cell);
  490. kfree(pa->pdev.resource);
  491. kfree(pa->pdev.driver_override);
  492. kfree(pa);
  493. }
  494. /**
  495. * platform_device_alloc - create a platform device
  496. * @name: base name of the device we're adding
  497. * @id: instance id
  498. *
  499. * Create a platform device object which can have other objects attached
  500. * to it, and which will have attached objects freed when it is released.
  501. */
  502. struct platform_device *platform_device_alloc(const char *name, int id)
  503. {
  504. struct platform_object *pa;
  505. pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
  506. if (pa) {
  507. strcpy(pa->name, name);
  508. pa->pdev.name = pa->name;
  509. pa->pdev.id = id;
  510. device_initialize(&pa->pdev.dev);
  511. pa->pdev.dev.release = platform_device_release;
  512. setup_pdev_dma_masks(&pa->pdev);
  513. }
  514. return pa ? &pa->pdev : NULL;
  515. }
  516. EXPORT_SYMBOL_GPL(platform_device_alloc);
  517. /**
  518. * platform_device_add_resources - add resources to a platform device
  519. * @pdev: platform device allocated by platform_device_alloc to add resources to
  520. * @res: set of resources that needs to be allocated for the device
  521. * @num: number of resources
  522. *
  523. * Add a copy of the resources to the platform device. The memory
  524. * associated with the resources will be freed when the platform device is
  525. * released.
  526. */
  527. int platform_device_add_resources(struct platform_device *pdev,
  528. const struct resource *res, unsigned int num)
  529. {
  530. struct resource *r = NULL;
  531. if (res) {
  532. r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
  533. if (!r)
  534. return -ENOMEM;
  535. }
  536. kfree(pdev->resource);
  537. pdev->resource = r;
  538. pdev->num_resources = num;
  539. return 0;
  540. }
  541. EXPORT_SYMBOL_GPL(platform_device_add_resources);
  542. /**
  543. * platform_device_add_data - add platform-specific data to a platform device
  544. * @pdev: platform device allocated by platform_device_alloc to add resources to
  545. * @data: platform specific data for this platform device
  546. * @size: size of platform specific data
  547. *
  548. * Add a copy of platform specific data to the platform device's
  549. * platform_data pointer. The memory associated with the platform data
  550. * will be freed when the platform device is released.
  551. */
  552. int platform_device_add_data(struct platform_device *pdev, const void *data,
  553. size_t size)
  554. {
  555. void *d = NULL;
  556. if (data) {
  557. d = kmemdup(data, size, GFP_KERNEL);
  558. if (!d)
  559. return -ENOMEM;
  560. }
  561. kfree(pdev->dev.platform_data);
  562. pdev->dev.platform_data = d;
  563. return 0;
  564. }
  565. EXPORT_SYMBOL_GPL(platform_device_add_data);
  566. /**
  567. * platform_device_add - add a platform device to device hierarchy
  568. * @pdev: platform device we're adding
  569. *
  570. * This is part 2 of platform_device_register(), though may be called
  571. * separately _iff_ pdev was allocated by platform_device_alloc().
  572. */
  573. int platform_device_add(struct platform_device *pdev)
  574. {
  575. u32 i;
  576. int ret;
  577. if (!pdev)
  578. return -EINVAL;
  579. if (!pdev->dev.parent)
  580. pdev->dev.parent = &platform_bus;
  581. pdev->dev.bus = &platform_bus_type;
  582. switch (pdev->id) {
  583. default:
  584. dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
  585. break;
  586. case PLATFORM_DEVID_NONE:
  587. dev_set_name(&pdev->dev, "%s", pdev->name);
  588. break;
  589. case PLATFORM_DEVID_AUTO:
  590. /*
  591. * Automatically allocated device ID. We mark it as such so
  592. * that we remember it must be freed, and we append a suffix
  593. * to avoid namespace collision with explicit IDs.
  594. */
  595. ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
  596. if (ret < 0)
  597. goto err_out;
  598. pdev->id = ret;
  599. pdev->id_auto = true;
  600. dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
  601. break;
  602. }
  603. for (i = 0; i < pdev->num_resources; i++) {
  604. struct resource *p, *r = &pdev->resource[i];
  605. if (r->name == NULL)
  606. r->name = dev_name(&pdev->dev);
  607. p = r->parent;
  608. if (!p) {
  609. if (resource_type(r) == IORESOURCE_MEM)
  610. p = &iomem_resource;
  611. else if (resource_type(r) == IORESOURCE_IO)
  612. p = &ioport_resource;
  613. }
  614. if (p) {
  615. ret = insert_resource(p, r);
  616. if (ret) {
  617. dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
  618. goto failed;
  619. }
  620. }
  621. }
  622. pr_debug("Registering platform device '%s'. Parent at %s\n",
  623. dev_name(&pdev->dev), dev_name(pdev->dev.parent));
  624. ret = device_add(&pdev->dev);
  625. if (ret == 0)
  626. return ret;
  627. failed:
  628. if (pdev->id_auto) {
  629. ida_free(&platform_devid_ida, pdev->id);
  630. pdev->id = PLATFORM_DEVID_AUTO;
  631. }
  632. while (i--) {
  633. struct resource *r = &pdev->resource[i];
  634. if (r->parent)
  635. release_resource(r);
  636. }
  637. err_out:
  638. return ret;
  639. }
  640. EXPORT_SYMBOL_GPL(platform_device_add);
  641. /**
  642. * platform_device_del - remove a platform-level device
  643. * @pdev: platform device we're removing
  644. *
  645. * Note that this function will also release all memory- and port-based
  646. * resources owned by the device (@dev->resource). This function must
  647. * _only_ be externally called in error cases. All other usage is a bug.
  648. */
  649. void platform_device_del(struct platform_device *pdev)
  650. {
  651. u32 i;
  652. if (!IS_ERR_OR_NULL(pdev)) {
  653. device_del(&pdev->dev);
  654. if (pdev->id_auto) {
  655. ida_free(&platform_devid_ida, pdev->id);
  656. pdev->id = PLATFORM_DEVID_AUTO;
  657. }
  658. for (i = 0; i < pdev->num_resources; i++) {
  659. struct resource *r = &pdev->resource[i];
  660. if (r->parent)
  661. release_resource(r);
  662. }
  663. }
  664. }
  665. EXPORT_SYMBOL_GPL(platform_device_del);
  666. /**
  667. * platform_device_register - add a platform-level device
  668. * @pdev: platform device we're adding
  669. *
  670. * NOTE: _Never_ directly free @pdev after calling this function, even if it
  671. * returned an error! Always use platform_device_put() to give up the
  672. * reference initialised in this function instead.
  673. */
  674. int platform_device_register(struct platform_device *pdev)
  675. {
  676. device_initialize(&pdev->dev);
  677. setup_pdev_dma_masks(pdev);
  678. return platform_device_add(pdev);
  679. }
  680. EXPORT_SYMBOL_GPL(platform_device_register);
  681. /**
  682. * platform_device_unregister - unregister a platform-level device
  683. * @pdev: platform device we're unregistering
  684. *
  685. * Unregistration is done in 2 steps. First we release all resources
  686. * and remove it from the subsystem, then we drop reference count by
  687. * calling platform_device_put().
  688. */
  689. void platform_device_unregister(struct platform_device *pdev)
  690. {
  691. platform_device_del(pdev);
  692. platform_device_put(pdev);
  693. }
  694. EXPORT_SYMBOL_GPL(platform_device_unregister);
  695. /**
  696. * platform_device_register_full - add a platform-level device with
  697. * resources and platform-specific data
  698. *
  699. * @pdevinfo: data used to create device
  700. *
  701. * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  702. */
  703. struct platform_device *platform_device_register_full(
  704. const struct platform_device_info *pdevinfo)
  705. {
  706. int ret;
  707. struct platform_device *pdev;
  708. pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
  709. if (!pdev)
  710. return ERR_PTR(-ENOMEM);
  711. pdev->dev.parent = pdevinfo->parent;
  712. pdev->dev.fwnode = pdevinfo->fwnode;
  713. pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
  714. pdev->dev.of_node_reused = pdevinfo->of_node_reused;
  715. if (pdevinfo->dma_mask) {
  716. pdev->platform_dma_mask = pdevinfo->dma_mask;
  717. pdev->dev.dma_mask = &pdev->platform_dma_mask;
  718. pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
  719. }
  720. ret = platform_device_add_resources(pdev,
  721. pdevinfo->res, pdevinfo->num_res);
  722. if (ret)
  723. goto err;
  724. ret = platform_device_add_data(pdev,
  725. pdevinfo->data, pdevinfo->size_data);
  726. if (ret)
  727. goto err;
  728. if (pdevinfo->properties) {
  729. ret = device_create_managed_software_node(&pdev->dev,
  730. pdevinfo->properties, NULL);
  731. if (ret)
  732. goto err;
  733. }
  734. ret = platform_device_add(pdev);
  735. if (ret) {
  736. err:
  737. ACPI_COMPANION_SET(&pdev->dev, NULL);
  738. platform_device_put(pdev);
  739. return ERR_PTR(ret);
  740. }
  741. return pdev;
  742. }
  743. EXPORT_SYMBOL_GPL(platform_device_register_full);
  744. /**
  745. * __platform_driver_register - register a driver for platform-level devices
  746. * @drv: platform driver structure
  747. * @owner: owning module/driver
  748. */
  749. int __platform_driver_register(struct platform_driver *drv,
  750. struct module *owner)
  751. {
  752. drv->driver.owner = owner;
  753. drv->driver.bus = &platform_bus_type;
  754. return driver_register(&drv->driver);
  755. }
  756. EXPORT_SYMBOL_GPL(__platform_driver_register);
  757. /**
  758. * platform_driver_unregister - unregister a driver for platform-level devices
  759. * @drv: platform driver structure
  760. */
  761. void platform_driver_unregister(struct platform_driver *drv)
  762. {
  763. driver_unregister(&drv->driver);
  764. }
  765. EXPORT_SYMBOL_GPL(platform_driver_unregister);
  766. static int platform_probe_fail(struct platform_device *pdev)
  767. {
  768. return -ENXIO;
  769. }
  770. /**
  771. * __platform_driver_probe - register driver for non-hotpluggable device
  772. * @drv: platform driver structure
  773. * @probe: the driver probe routine, probably from an __init section
  774. * @module: module which will be the owner of the driver
  775. *
  776. * Use this instead of platform_driver_register() when you know the device
  777. * is not hotpluggable and has already been registered, and you want to
  778. * remove its run-once probe() infrastructure from memory after the driver
  779. * has bound to the device.
  780. *
  781. * One typical use for this would be with drivers for controllers integrated
  782. * into system-on-chip processors, where the controller devices have been
  783. * configured as part of board setup.
  784. *
  785. * Note that this is incompatible with deferred probing.
  786. *
  787. * Returns zero if the driver registered and bound to a device, else returns
  788. * a negative error code and with the driver not registered.
  789. */
  790. int __init_or_module __platform_driver_probe(struct platform_driver *drv,
  791. int (*probe)(struct platform_device *), struct module *module)
  792. {
  793. int retval, code;
  794. if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
  795. pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
  796. drv->driver.name, __func__);
  797. return -EINVAL;
  798. }
  799. /*
  800. * We have to run our probes synchronously because we check if
  801. * we find any devices to bind to and exit with error if there
  802. * are any.
  803. */
  804. drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
  805. /*
  806. * Prevent driver from requesting probe deferral to avoid further
  807. * futile probe attempts.
  808. */
  809. drv->prevent_deferred_probe = true;
  810. /* make sure driver won't have bind/unbind attributes */
  811. drv->driver.suppress_bind_attrs = true;
  812. /* temporary section violation during probe() */
  813. drv->probe = probe;
  814. retval = code = __platform_driver_register(drv, module);
  815. if (retval)
  816. return retval;
  817. /*
  818. * Fixup that section violation, being paranoid about code scanning
  819. * the list of drivers in order to probe new devices. Check to see
  820. * if the probe was successful, and make sure any forced probes of
  821. * new devices fail.
  822. */
  823. spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
  824. drv->probe = platform_probe_fail;
  825. if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
  826. retval = -ENODEV;
  827. spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
  828. if (code != retval)
  829. platform_driver_unregister(drv);
  830. return retval;
  831. }
  832. EXPORT_SYMBOL_GPL(__platform_driver_probe);
  833. /**
  834. * __platform_create_bundle - register driver and create corresponding device
  835. * @driver: platform driver structure
  836. * @probe: the driver probe routine, probably from an __init section
  837. * @res: set of resources that needs to be allocated for the device
  838. * @n_res: number of resources
  839. * @data: platform specific data for this platform device
  840. * @size: size of platform specific data
  841. * @module: module which will be the owner of the driver
  842. *
  843. * Use this in legacy-style modules that probe hardware directly and
  844. * register a single platform device and corresponding platform driver.
  845. *
  846. * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  847. */
  848. struct platform_device * __init_or_module __platform_create_bundle(
  849. struct platform_driver *driver,
  850. int (*probe)(struct platform_device *),
  851. struct resource *res, unsigned int n_res,
  852. const void *data, size_t size, struct module *module)
  853. {
  854. struct platform_device *pdev;
  855. int error;
  856. pdev = platform_device_alloc(driver->driver.name, -1);
  857. if (!pdev) {
  858. error = -ENOMEM;
  859. goto err_out;
  860. }
  861. error = platform_device_add_resources(pdev, res, n_res);
  862. if (error)
  863. goto err_pdev_put;
  864. error = platform_device_add_data(pdev, data, size);
  865. if (error)
  866. goto err_pdev_put;
  867. error = platform_device_add(pdev);
  868. if (error)
  869. goto err_pdev_put;
  870. error = __platform_driver_probe(driver, probe, module);
  871. if (error)
  872. goto err_pdev_del;
  873. return pdev;
  874. err_pdev_del:
  875. platform_device_del(pdev);
  876. err_pdev_put:
  877. platform_device_put(pdev);
  878. err_out:
  879. return ERR_PTR(error);
  880. }
  881. EXPORT_SYMBOL_GPL(__platform_create_bundle);
  882. /**
  883. * __platform_register_drivers - register an array of platform drivers
  884. * @drivers: an array of drivers to register
  885. * @count: the number of drivers to register
  886. * @owner: module owning the drivers
  887. *
  888. * Registers platform drivers specified by an array. On failure to register a
  889. * driver, all previously registered drivers will be unregistered. Callers of
  890. * this API should use platform_unregister_drivers() to unregister drivers in
  891. * the reverse order.
  892. *
  893. * Returns: 0 on success or a negative error code on failure.
  894. */
  895. int __platform_register_drivers(struct platform_driver * const *drivers,
  896. unsigned int count, struct module *owner)
  897. {
  898. unsigned int i;
  899. int err;
  900. for (i = 0; i < count; i++) {
  901. pr_debug("registering platform driver %ps\n", drivers[i]);
  902. err = __platform_driver_register(drivers[i], owner);
  903. if (err < 0) {
  904. pr_err("failed to register platform driver %ps: %d\n",
  905. drivers[i], err);
  906. goto error;
  907. }
  908. }
  909. return 0;
  910. error:
  911. while (i--) {
  912. pr_debug("unregistering platform driver %ps\n", drivers[i]);
  913. platform_driver_unregister(drivers[i]);
  914. }
  915. return err;
  916. }
  917. EXPORT_SYMBOL_GPL(__platform_register_drivers);
  918. /**
  919. * platform_unregister_drivers - unregister an array of platform drivers
  920. * @drivers: an array of drivers to unregister
  921. * @count: the number of drivers to unregister
  922. *
  923. * Unregisters platform drivers specified by an array. This is typically used
  924. * to complement an earlier call to platform_register_drivers(). Drivers are
  925. * unregistered in the reverse order in which they were registered.
  926. */
  927. void platform_unregister_drivers(struct platform_driver * const *drivers,
  928. unsigned int count)
  929. {
  930. while (count--) {
  931. pr_debug("unregistering platform driver %ps\n", drivers[count]);
  932. platform_driver_unregister(drivers[count]);
  933. }
  934. }
  935. EXPORT_SYMBOL_GPL(platform_unregister_drivers);
  936. static const struct platform_device_id *platform_match_id(
  937. const struct platform_device_id *id,
  938. struct platform_device *pdev)
  939. {
  940. while (id->name[0]) {
  941. if (strcmp(pdev->name, id->name) == 0) {
  942. pdev->id_entry = id;
  943. return id;
  944. }
  945. id++;
  946. }
  947. return NULL;
  948. }
  949. #ifdef CONFIG_PM_SLEEP
  950. static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
  951. {
  952. struct platform_driver *pdrv = to_platform_driver(dev->driver);
  953. struct platform_device *pdev = to_platform_device(dev);
  954. int ret = 0;
  955. if (dev->driver && pdrv->suspend)
  956. ret = pdrv->suspend(pdev, mesg);
  957. return ret;
  958. }
  959. static int platform_legacy_resume(struct device *dev)
  960. {
  961. struct platform_driver *pdrv = to_platform_driver(dev->driver);
  962. struct platform_device *pdev = to_platform_device(dev);
  963. int ret = 0;
  964. if (dev->driver && pdrv->resume)
  965. ret = pdrv->resume(pdev);
  966. return ret;
  967. }
  968. #endif /* CONFIG_PM_SLEEP */
  969. #ifdef CONFIG_SUSPEND
  970. int platform_pm_suspend(struct device *dev)
  971. {
  972. struct device_driver *drv = dev->driver;
  973. int ret = 0;
  974. if (!drv)
  975. return 0;
  976. if (drv->pm) {
  977. if (drv->pm->suspend)
  978. ret = drv->pm->suspend(dev);
  979. } else {
  980. ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
  981. }
  982. return ret;
  983. }
  984. int platform_pm_resume(struct device *dev)
  985. {
  986. struct device_driver *drv = dev->driver;
  987. int ret = 0;
  988. if (!drv)
  989. return 0;
  990. if (drv->pm) {
  991. if (drv->pm->resume)
  992. ret = drv->pm->resume(dev);
  993. } else {
  994. ret = platform_legacy_resume(dev);
  995. }
  996. return ret;
  997. }
  998. #endif /* CONFIG_SUSPEND */
  999. #ifdef CONFIG_HIBERNATE_CALLBACKS
  1000. int platform_pm_freeze(struct device *dev)
  1001. {
  1002. struct device_driver *drv = dev->driver;
  1003. int ret = 0;
  1004. if (!drv)
  1005. return 0;
  1006. if (drv->pm) {
  1007. if (drv->pm->freeze)
  1008. ret = drv->pm->freeze(dev);
  1009. } else {
  1010. ret = platform_legacy_suspend(dev, PMSG_FREEZE);
  1011. }
  1012. return ret;
  1013. }
  1014. int platform_pm_thaw(struct device *dev)
  1015. {
  1016. struct device_driver *drv = dev->driver;
  1017. int ret = 0;
  1018. if (!drv)
  1019. return 0;
  1020. if (drv->pm) {
  1021. if (drv->pm->thaw)
  1022. ret = drv->pm->thaw(dev);
  1023. } else {
  1024. ret = platform_legacy_resume(dev);
  1025. }
  1026. return ret;
  1027. }
  1028. int platform_pm_poweroff(struct device *dev)
  1029. {
  1030. struct device_driver *drv = dev->driver;
  1031. int ret = 0;
  1032. if (!drv)
  1033. return 0;
  1034. if (drv->pm) {
  1035. if (drv->pm->poweroff)
  1036. ret = drv->pm->poweroff(dev);
  1037. } else {
  1038. ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
  1039. }
  1040. return ret;
  1041. }
  1042. int platform_pm_restore(struct device *dev)
  1043. {
  1044. struct device_driver *drv = dev->driver;
  1045. int ret = 0;
  1046. if (!drv)
  1047. return 0;
  1048. if (drv->pm) {
  1049. if (drv->pm->restore)
  1050. ret = drv->pm->restore(dev);
  1051. } else {
  1052. ret = platform_legacy_resume(dev);
  1053. }
  1054. return ret;
  1055. }
  1056. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  1057. /* modalias support enables more hands-off userspace setup:
  1058. * (a) environment variable lets new-style hotplug events work once system is
  1059. * fully running: "modprobe $MODALIAS"
  1060. * (b) sysfs attribute lets new-style coldplug recover from hotplug events
  1061. * mishandled before system is fully running: "modprobe $(cat modalias)"
  1062. */
  1063. static ssize_t modalias_show(struct device *dev,
  1064. struct device_attribute *attr, char *buf)
  1065. {
  1066. struct platform_device *pdev = to_platform_device(dev);
  1067. int len;
  1068. len = of_device_modalias(dev, buf, PAGE_SIZE);
  1069. if (len != -ENODEV)
  1070. return len;
  1071. len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  1072. if (len != -ENODEV)
  1073. return len;
  1074. return sysfs_emit(buf, "platform:%s\n", pdev->name);
  1075. }
  1076. static DEVICE_ATTR_RO(modalias);
  1077. static ssize_t numa_node_show(struct device *dev,
  1078. struct device_attribute *attr, char *buf)
  1079. {
  1080. return sysfs_emit(buf, "%d\n", dev_to_node(dev));
  1081. }
  1082. static DEVICE_ATTR_RO(numa_node);
  1083. static ssize_t driver_override_show(struct device *dev,
  1084. struct device_attribute *attr, char *buf)
  1085. {
  1086. struct platform_device *pdev = to_platform_device(dev);
  1087. ssize_t len;
  1088. device_lock(dev);
  1089. len = sysfs_emit(buf, "%s\n", pdev->driver_override);
  1090. device_unlock(dev);
  1091. return len;
  1092. }
  1093. static ssize_t driver_override_store(struct device *dev,
  1094. struct device_attribute *attr,
  1095. const char *buf, size_t count)
  1096. {
  1097. struct platform_device *pdev = to_platform_device(dev);
  1098. int ret;
  1099. ret = driver_set_override(dev, &pdev->driver_override, buf, count);
  1100. if (ret)
  1101. return ret;
  1102. return count;
  1103. }
  1104. static DEVICE_ATTR_RW(driver_override);
  1105. static struct attribute *platform_dev_attrs[] = {
  1106. &dev_attr_modalias.attr,
  1107. &dev_attr_numa_node.attr,
  1108. &dev_attr_driver_override.attr,
  1109. NULL,
  1110. };
  1111. static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
  1112. int n)
  1113. {
  1114. struct device *dev = container_of(kobj, typeof(*dev), kobj);
  1115. if (a == &dev_attr_numa_node.attr &&
  1116. dev_to_node(dev) == NUMA_NO_NODE)
  1117. return 0;
  1118. return a->mode;
  1119. }
  1120. static const struct attribute_group platform_dev_group = {
  1121. .attrs = platform_dev_attrs,
  1122. .is_visible = platform_dev_attrs_visible,
  1123. };
  1124. __ATTRIBUTE_GROUPS(platform_dev);
  1125. /**
  1126. * platform_match - bind platform device to platform driver.
  1127. * @dev: device.
  1128. * @drv: driver.
  1129. *
  1130. * Platform device IDs are assumed to be encoded like this:
  1131. * "<name><instance>", where <name> is a short description of the type of
  1132. * device, like "pci" or "floppy", and <instance> is the enumerated
  1133. * instance of the device, like '0' or '42'. Driver IDs are simply
  1134. * "<name>". So, extract the <name> from the platform_device structure,
  1135. * and compare it against the name of the driver. Return whether they match
  1136. * or not.
  1137. */
  1138. static int platform_match(struct device *dev, struct device_driver *drv)
  1139. {
  1140. struct platform_device *pdev = to_platform_device(dev);
  1141. struct platform_driver *pdrv = to_platform_driver(drv);
  1142. /* When driver_override is set, only bind to the matching driver */
  1143. if (pdev->driver_override)
  1144. return !strcmp(pdev->driver_override, drv->name);
  1145. /* Attempt an OF style match first */
  1146. if (of_driver_match_device(dev, drv))
  1147. return 1;
  1148. /* Then try ACPI style match */
  1149. if (acpi_driver_match_device(dev, drv))
  1150. return 1;
  1151. /* Then try to match against the id table */
  1152. if (pdrv->id_table)
  1153. return platform_match_id(pdrv->id_table, pdev) != NULL;
  1154. /* fall-back to driver name match */
  1155. return (strcmp(pdev->name, drv->name) == 0);
  1156. }
  1157. static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
  1158. {
  1159. struct platform_device *pdev = to_platform_device(dev);
  1160. int rc;
  1161. /* Some devices have extra OF data and an OF-style MODALIAS */
  1162. rc = of_device_uevent_modalias(dev, env);
  1163. if (rc != -ENODEV)
  1164. return rc;
  1165. rc = acpi_device_uevent_modalias(dev, env);
  1166. if (rc != -ENODEV)
  1167. return rc;
  1168. add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
  1169. pdev->name);
  1170. return 0;
  1171. }
  1172. static int platform_probe(struct device *_dev)
  1173. {
  1174. struct platform_driver *drv = to_platform_driver(_dev->driver);
  1175. struct platform_device *dev = to_platform_device(_dev);
  1176. int ret;
  1177. /*
  1178. * A driver registered using platform_driver_probe() cannot be bound
  1179. * again later because the probe function usually lives in __init code
  1180. * and so is gone. For these drivers .probe is set to
  1181. * platform_probe_fail in __platform_driver_probe(). Don't even prepare
  1182. * clocks and PM domains for these to match the traditional behaviour.
  1183. */
  1184. if (unlikely(drv->probe == platform_probe_fail))
  1185. return -ENXIO;
  1186. ret = of_clk_set_defaults(_dev->of_node, false);
  1187. if (ret < 0)
  1188. return ret;
  1189. ret = dev_pm_domain_attach(_dev, true);
  1190. if (ret)
  1191. goto out;
  1192. if (drv->probe) {
  1193. ret = drv->probe(dev);
  1194. if (ret)
  1195. dev_pm_domain_detach(_dev, true);
  1196. }
  1197. out:
  1198. if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
  1199. dev_warn(_dev, "probe deferral not supported\n");
  1200. ret = -ENXIO;
  1201. }
  1202. return ret;
  1203. }
  1204. static void platform_remove(struct device *_dev)
  1205. {
  1206. struct platform_driver *drv = to_platform_driver(_dev->driver);
  1207. struct platform_device *dev = to_platform_device(_dev);
  1208. if (drv->remove_new) {
  1209. drv->remove_new(dev);
  1210. } else if (drv->remove) {
  1211. int ret = drv->remove(dev);
  1212. if (ret)
  1213. dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
  1214. }
  1215. dev_pm_domain_detach(_dev, true);
  1216. }
  1217. static void platform_shutdown(struct device *_dev)
  1218. {
  1219. struct platform_device *dev = to_platform_device(_dev);
  1220. struct platform_driver *drv;
  1221. if (!_dev->driver)
  1222. return;
  1223. drv = to_platform_driver(_dev->driver);
  1224. if (drv->shutdown)
  1225. drv->shutdown(dev);
  1226. }
  1227. static int platform_dma_configure(struct device *dev)
  1228. {
  1229. struct platform_driver *drv = to_platform_driver(dev->driver);
  1230. enum dev_dma_attr attr;
  1231. int ret = 0;
  1232. if (dev->of_node) {
  1233. ret = of_dma_configure(dev, dev->of_node, true);
  1234. } else if (has_acpi_companion(dev)) {
  1235. attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
  1236. ret = acpi_dma_configure(dev, attr);
  1237. }
  1238. if (!ret && !drv->driver_managed_dma) {
  1239. ret = iommu_device_use_default_domain(dev);
  1240. if (ret)
  1241. arch_teardown_dma_ops(dev);
  1242. }
  1243. return ret;
  1244. }
  1245. static void platform_dma_cleanup(struct device *dev)
  1246. {
  1247. struct platform_driver *drv = to_platform_driver(dev->driver);
  1248. if (!drv->driver_managed_dma)
  1249. iommu_device_unuse_default_domain(dev);
  1250. }
  1251. static const struct dev_pm_ops platform_dev_pm_ops = {
  1252. SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
  1253. USE_PLATFORM_PM_SLEEP_OPS
  1254. };
  1255. struct bus_type platform_bus_type = {
  1256. .name = "platform",
  1257. .dev_groups = platform_dev_groups,
  1258. .match = platform_match,
  1259. .uevent = platform_uevent,
  1260. .probe = platform_probe,
  1261. .remove = platform_remove,
  1262. .shutdown = platform_shutdown,
  1263. .dma_configure = platform_dma_configure,
  1264. .dma_cleanup = platform_dma_cleanup,
  1265. .pm = &platform_dev_pm_ops,
  1266. };
  1267. EXPORT_SYMBOL_GPL(platform_bus_type);
  1268. static inline int __platform_match(struct device *dev, const void *drv)
  1269. {
  1270. return platform_match(dev, (struct device_driver *)drv);
  1271. }
  1272. /**
  1273. * platform_find_device_by_driver - Find a platform device with a given
  1274. * driver.
  1275. * @start: The device to start the search from.
  1276. * @drv: The device driver to look for.
  1277. */
  1278. struct device *platform_find_device_by_driver(struct device *start,
  1279. const struct device_driver *drv)
  1280. {
  1281. return bus_find_device(&platform_bus_type, start, drv,
  1282. __platform_match);
  1283. }
  1284. EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
  1285. void __weak __init early_platform_cleanup(void) { }
  1286. int __init platform_bus_init(void)
  1287. {
  1288. int error;
  1289. early_platform_cleanup();
  1290. error = device_register(&platform_bus);
  1291. if (error) {
  1292. put_device(&platform_bus);
  1293. return error;
  1294. }
  1295. error = bus_register(&platform_bus_type);
  1296. if (error)
  1297. device_unregister(&platform_bus);
  1298. of_platform_register_reconfig_notifier();
  1299. return error;
  1300. }