ucb1x00-core.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/drivers/mfd/ucb1x00-core.c
  4. *
  5. * Copyright (C) 2001 Russell King, All Rights Reserved.
  6. *
  7. * The UCB1x00 core driver provides basic services for handling IO,
  8. * the ADC, interrupts, and accessing registers. It is designed
  9. * such that everything goes through this layer, thereby providing
  10. * a consistent locking methodology, as well as allowing the drivers
  11. * to be used on other non-MCP-enabled hardware platforms.
  12. *
  13. * Note that all locks are private to this file. Nothing else may
  14. * touch them.
  15. */
  16. #include <linux/module.h>
  17. #include <linux/kernel.h>
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/init.h>
  21. #include <linux/errno.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/irq.h>
  24. #include <linux/device.h>
  25. #include <linux/mutex.h>
  26. #include <linux/mfd/ucb1x00.h>
  27. #include <linux/pm.h>
  28. #include <linux/gpio/driver.h>
  29. static DEFINE_MUTEX(ucb1x00_mutex);
  30. static LIST_HEAD(ucb1x00_drivers);
  31. static LIST_HEAD(ucb1x00_devices);
  32. /**
  33. * ucb1x00_io_set_dir - set IO direction
  34. * @ucb: UCB1x00 structure describing chip
  35. * @in: bitfield of IO pins to be set as inputs
  36. * @out: bitfield of IO pins to be set as outputs
  37. *
  38. * Set the IO direction of the ten general purpose IO pins on
  39. * the UCB1x00 chip. The @in bitfield has priority over the
  40. * @out bitfield, in that if you specify a pin as both input
  41. * and output, it will end up as an input.
  42. *
  43. * ucb1x00_enable must have been called to enable the comms
  44. * before using this function.
  45. *
  46. * This function takes a spinlock, disabling interrupts.
  47. */
  48. void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out)
  49. {
  50. unsigned long flags;
  51. spin_lock_irqsave(&ucb->io_lock, flags);
  52. ucb->io_dir |= out;
  53. ucb->io_dir &= ~in;
  54. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  55. spin_unlock_irqrestore(&ucb->io_lock, flags);
  56. }
  57. /**
  58. * ucb1x00_io_write - set or clear IO outputs
  59. * @ucb: UCB1x00 structure describing chip
  60. * @set: bitfield of IO pins to set to logic '1'
  61. * @clear: bitfield of IO pins to set to logic '0'
  62. *
  63. * Set the IO output state of the specified IO pins. The value
  64. * is retained if the pins are subsequently configured as inputs.
  65. * The @clear bitfield has priority over the @set bitfield -
  66. * outputs will be cleared.
  67. *
  68. * ucb1x00_enable must have been called to enable the comms
  69. * before using this function.
  70. *
  71. * This function takes a spinlock, disabling interrupts.
  72. */
  73. void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
  74. {
  75. unsigned long flags;
  76. spin_lock_irqsave(&ucb->io_lock, flags);
  77. ucb->io_out |= set;
  78. ucb->io_out &= ~clear;
  79. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  80. spin_unlock_irqrestore(&ucb->io_lock, flags);
  81. }
  82. /**
  83. * ucb1x00_io_read - read the current state of the IO pins
  84. * @ucb: UCB1x00 structure describing chip
  85. *
  86. * Return a bitfield describing the logic state of the ten
  87. * general purpose IO pins.
  88. *
  89. * ucb1x00_enable must have been called to enable the comms
  90. * before using this function.
  91. *
  92. * This function does not take any mutexes or spinlocks.
  93. */
  94. unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
  95. {
  96. return ucb1x00_reg_read(ucb, UCB_IO_DATA);
  97. }
  98. static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
  99. {
  100. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  101. unsigned long flags;
  102. spin_lock_irqsave(&ucb->io_lock, flags);
  103. if (value)
  104. ucb->io_out |= 1 << offset;
  105. else
  106. ucb->io_out &= ~(1 << offset);
  107. ucb1x00_enable(ucb);
  108. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  109. ucb1x00_disable(ucb);
  110. spin_unlock_irqrestore(&ucb->io_lock, flags);
  111. }
  112. static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
  113. {
  114. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  115. unsigned val;
  116. ucb1x00_enable(ucb);
  117. val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
  118. ucb1x00_disable(ucb);
  119. return !!(val & (1 << offset));
  120. }
  121. static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
  122. {
  123. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  124. unsigned long flags;
  125. spin_lock_irqsave(&ucb->io_lock, flags);
  126. ucb->io_dir &= ~(1 << offset);
  127. ucb1x00_enable(ucb);
  128. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  129. ucb1x00_disable(ucb);
  130. spin_unlock_irqrestore(&ucb->io_lock, flags);
  131. return 0;
  132. }
  133. static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
  134. , int value)
  135. {
  136. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  137. unsigned long flags;
  138. unsigned old, mask = 1 << offset;
  139. spin_lock_irqsave(&ucb->io_lock, flags);
  140. old = ucb->io_out;
  141. if (value)
  142. ucb->io_out |= mask;
  143. else
  144. ucb->io_out &= ~mask;
  145. ucb1x00_enable(ucb);
  146. if (old != ucb->io_out)
  147. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  148. if (!(ucb->io_dir & mask)) {
  149. ucb->io_dir |= mask;
  150. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  151. }
  152. ucb1x00_disable(ucb);
  153. spin_unlock_irqrestore(&ucb->io_lock, flags);
  154. return 0;
  155. }
  156. static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
  157. {
  158. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  159. return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
  160. }
  161. /*
  162. * UCB1300 data sheet says we must:
  163. * 1. enable ADC => 5us (including reference startup time)
  164. * 2. select input => 51*tsibclk => 4.3us
  165. * 3. start conversion => 102*tsibclk => 8.5us
  166. * (tsibclk = 1/11981000)
  167. * Period between SIB 128-bit frames = 10.7us
  168. */
  169. /**
  170. * ucb1x00_adc_enable - enable the ADC converter
  171. * @ucb: UCB1x00 structure describing chip
  172. *
  173. * Enable the ucb1x00 and ADC converter on the UCB1x00 for use.
  174. * Any code wishing to use the ADC converter must call this
  175. * function prior to using it.
  176. *
  177. * This function takes the ADC mutex to prevent two or more
  178. * concurrent uses, and therefore may sleep. As a result, it
  179. * can only be called from process context, not interrupt
  180. * context.
  181. *
  182. * You should release the ADC as soon as possible using
  183. * ucb1x00_adc_disable.
  184. */
  185. void ucb1x00_adc_enable(struct ucb1x00 *ucb)
  186. {
  187. mutex_lock(&ucb->adc_mutex);
  188. ucb->adc_cr |= UCB_ADC_ENA;
  189. ucb1x00_enable(ucb);
  190. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
  191. }
  192. /**
  193. * ucb1x00_adc_read - read the specified ADC channel
  194. * @ucb: UCB1x00 structure describing chip
  195. * @adc_channel: ADC channel mask
  196. * @sync: wait for syncronisation pulse.
  197. *
  198. * Start an ADC conversion and wait for the result. Note that
  199. * synchronised ADC conversions (via the ADCSYNC pin) must wait
  200. * until the trigger is asserted and the conversion is finished.
  201. *
  202. * This function currently spins waiting for the conversion to
  203. * complete (2 frames max without sync).
  204. *
  205. * If called for a synchronised ADC conversion, it may sleep
  206. * with the ADC mutex held.
  207. */
  208. unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
  209. {
  210. unsigned int val;
  211. if (sync)
  212. adc_channel |= UCB_ADC_SYNC_ENA;
  213. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel);
  214. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START);
  215. for (;;) {
  216. val = ucb1x00_reg_read(ucb, UCB_ADC_DATA);
  217. if (val & UCB_ADC_DAT_VAL)
  218. break;
  219. /* yield to other processes */
  220. set_current_state(TASK_INTERRUPTIBLE);
  221. schedule_timeout(1);
  222. }
  223. return UCB_ADC_DAT(val);
  224. }
  225. /**
  226. * ucb1x00_adc_disable - disable the ADC converter
  227. * @ucb: UCB1x00 structure describing chip
  228. *
  229. * Disable the ADC converter and release the ADC mutex.
  230. */
  231. void ucb1x00_adc_disable(struct ucb1x00 *ucb)
  232. {
  233. ucb->adc_cr &= ~UCB_ADC_ENA;
  234. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
  235. ucb1x00_disable(ucb);
  236. mutex_unlock(&ucb->adc_mutex);
  237. }
  238. /*
  239. * UCB1x00 Interrupt handling.
  240. *
  241. * The UCB1x00 can generate interrupts when the SIBCLK is stopped.
  242. * Since we need to read an internal register, we must re-enable
  243. * SIBCLK to talk to the chip. We leave the clock running until
  244. * we have finished processing all interrupts from the chip.
  245. */
  246. static void ucb1x00_irq(struct irq_desc *desc)
  247. {
  248. struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
  249. unsigned int isr, i;
  250. ucb1x00_enable(ucb);
  251. isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS);
  252. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
  253. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
  254. for (i = 0; i < 16 && isr; i++, isr >>= 1)
  255. if (isr & 1)
  256. generic_handle_irq(ucb->irq_base + i);
  257. ucb1x00_disable(ucb);
  258. }
  259. static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
  260. {
  261. ucb1x00_enable(ucb);
  262. if (ucb->irq_ris_enbl & mask)
  263. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  264. ucb->irq_mask);
  265. if (ucb->irq_fal_enbl & mask)
  266. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  267. ucb->irq_mask);
  268. ucb1x00_disable(ucb);
  269. }
  270. static void ucb1x00_irq_noop(struct irq_data *data)
  271. {
  272. }
  273. static void ucb1x00_irq_mask(struct irq_data *data)
  274. {
  275. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  276. unsigned mask = 1 << (data->irq - ucb->irq_base);
  277. raw_spin_lock(&ucb->irq_lock);
  278. ucb->irq_mask &= ~mask;
  279. ucb1x00_irq_update(ucb, mask);
  280. raw_spin_unlock(&ucb->irq_lock);
  281. }
  282. static void ucb1x00_irq_unmask(struct irq_data *data)
  283. {
  284. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  285. unsigned mask = 1 << (data->irq - ucb->irq_base);
  286. raw_spin_lock(&ucb->irq_lock);
  287. ucb->irq_mask |= mask;
  288. ucb1x00_irq_update(ucb, mask);
  289. raw_spin_unlock(&ucb->irq_lock);
  290. }
  291. static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type)
  292. {
  293. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  294. unsigned mask = 1 << (data->irq - ucb->irq_base);
  295. raw_spin_lock(&ucb->irq_lock);
  296. if (type & IRQ_TYPE_EDGE_RISING)
  297. ucb->irq_ris_enbl |= mask;
  298. else
  299. ucb->irq_ris_enbl &= ~mask;
  300. if (type & IRQ_TYPE_EDGE_FALLING)
  301. ucb->irq_fal_enbl |= mask;
  302. else
  303. ucb->irq_fal_enbl &= ~mask;
  304. if (ucb->irq_mask & mask) {
  305. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  306. ucb->irq_mask);
  307. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  308. ucb->irq_mask);
  309. }
  310. raw_spin_unlock(&ucb->irq_lock);
  311. return 0;
  312. }
  313. static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on)
  314. {
  315. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  316. struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
  317. unsigned mask = 1 << (data->irq - ucb->irq_base);
  318. if (!pdata || !pdata->can_wakeup)
  319. return -EINVAL;
  320. raw_spin_lock(&ucb->irq_lock);
  321. if (on)
  322. ucb->irq_wake |= mask;
  323. else
  324. ucb->irq_wake &= ~mask;
  325. raw_spin_unlock(&ucb->irq_lock);
  326. return 0;
  327. }
  328. static struct irq_chip ucb1x00_irqchip = {
  329. .name = "ucb1x00",
  330. .irq_ack = ucb1x00_irq_noop,
  331. .irq_mask = ucb1x00_irq_mask,
  332. .irq_unmask = ucb1x00_irq_unmask,
  333. .irq_set_type = ucb1x00_irq_set_type,
  334. .irq_set_wake = ucb1x00_irq_set_wake,
  335. };
  336. static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
  337. {
  338. struct ucb1x00_dev *dev;
  339. int ret;
  340. dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL);
  341. if (!dev)
  342. return -ENOMEM;
  343. dev->ucb = ucb;
  344. dev->drv = drv;
  345. ret = drv->add(dev);
  346. if (ret) {
  347. kfree(dev);
  348. return ret;
  349. }
  350. list_add_tail(&dev->dev_node, &ucb->devs);
  351. list_add_tail(&dev->drv_node, &drv->devs);
  352. return ret;
  353. }
  354. static void ucb1x00_remove_dev(struct ucb1x00_dev *dev)
  355. {
  356. dev->drv->remove(dev);
  357. list_del(&dev->dev_node);
  358. list_del(&dev->drv_node);
  359. kfree(dev);
  360. }
  361. /*
  362. * Try to probe our interrupt, rather than relying on lots of
  363. * hard-coded machine dependencies. For reference, the expected
  364. * IRQ mappings are:
  365. *
  366. * Machine Default IRQ
  367. * adsbitsy IRQ_GPCIN4
  368. * cerf IRQ_GPIO_UCB1200_IRQ
  369. * flexanet IRQ_GPIO_GUI
  370. * freebird IRQ_GPIO_FREEBIRD_UCB1300_IRQ
  371. * graphicsclient ADS_EXT_IRQ(8)
  372. * graphicsmaster ADS_EXT_IRQ(8)
  373. * lart LART_IRQ_UCB1200
  374. * omnimeter IRQ_GPIO23
  375. * pfs168 IRQ_GPIO_UCB1300_IRQ
  376. * simpad IRQ_GPIO_UCB1300_IRQ
  377. * shannon SHANNON_IRQ_GPIO_IRQ_CODEC
  378. * yopy IRQ_GPIO_UCB1200_IRQ
  379. */
  380. static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
  381. {
  382. unsigned long mask;
  383. mask = probe_irq_on();
  384. /*
  385. * Enable the ADC interrupt.
  386. */
  387. ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
  388. ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
  389. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
  390. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
  391. /*
  392. * Cause an ADC interrupt.
  393. */
  394. ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
  395. ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
  396. /*
  397. * Wait for the conversion to complete.
  398. */
  399. while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0);
  400. ucb1x00_reg_write(ucb, UCB_ADC_CR, 0);
  401. /*
  402. * Disable and clear interrupt.
  403. */
  404. ucb1x00_reg_write(ucb, UCB_IE_RIS, 0);
  405. ucb1x00_reg_write(ucb, UCB_IE_FAL, 0);
  406. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
  407. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
  408. /*
  409. * Read triggered interrupt.
  410. */
  411. return probe_irq_off(mask);
  412. }
  413. static void ucb1x00_release(struct device *dev)
  414. {
  415. struct ucb1x00 *ucb = classdev_to_ucb1x00(dev);
  416. kfree(ucb);
  417. }
  418. static struct class ucb1x00_class = {
  419. .name = "ucb1x00",
  420. .dev_release = ucb1x00_release,
  421. };
  422. static int ucb1x00_probe(struct mcp *mcp)
  423. {
  424. struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
  425. struct ucb1x00_driver *drv;
  426. struct ucb1x00 *ucb;
  427. unsigned id, i, irq_base;
  428. int ret = -ENODEV;
  429. /* Tell the platform to deassert the UCB1x00 reset */
  430. if (pdata && pdata->reset)
  431. pdata->reset(UCB_RST_PROBE);
  432. mcp_enable(mcp);
  433. id = mcp_reg_read(mcp, UCB_ID);
  434. mcp_disable(mcp);
  435. if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
  436. printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
  437. goto out;
  438. }
  439. ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
  440. ret = -ENOMEM;
  441. if (!ucb)
  442. goto out;
  443. device_initialize(&ucb->dev);
  444. ucb->dev.class = &ucb1x00_class;
  445. ucb->dev.parent = &mcp->attached_device;
  446. dev_set_name(&ucb->dev, "ucb1x00");
  447. raw_spin_lock_init(&ucb->irq_lock);
  448. spin_lock_init(&ucb->io_lock);
  449. mutex_init(&ucb->adc_mutex);
  450. ucb->id = id;
  451. ucb->mcp = mcp;
  452. ret = device_add(&ucb->dev);
  453. if (ret)
  454. goto err_dev_add;
  455. ucb1x00_enable(ucb);
  456. ucb->irq = ucb1x00_detect_irq(ucb);
  457. ucb1x00_disable(ucb);
  458. if (!ucb->irq) {
  459. dev_err(&ucb->dev, "IRQ probe failed\n");
  460. ret = -ENODEV;
  461. goto err_no_irq;
  462. }
  463. ucb->gpio.base = -1;
  464. irq_base = pdata ? pdata->irq_base : 0;
  465. ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
  466. if (ucb->irq_base < 0) {
  467. dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
  468. ucb->irq_base);
  469. ret = ucb->irq_base;
  470. goto err_irq_alloc;
  471. }
  472. for (i = 0; i < 16; i++) {
  473. unsigned irq = ucb->irq_base + i;
  474. irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
  475. irq_set_chip_data(irq, ucb);
  476. irq_clear_status_flags(irq, IRQ_NOREQUEST);
  477. }
  478. irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
  479. irq_set_chained_handler_and_data(ucb->irq, ucb1x00_irq, ucb);
  480. if (pdata && pdata->gpio_base) {
  481. ucb->gpio.label = dev_name(&ucb->dev);
  482. ucb->gpio.parent = &ucb->dev;
  483. ucb->gpio.owner = THIS_MODULE;
  484. ucb->gpio.base = pdata->gpio_base;
  485. ucb->gpio.ngpio = 10;
  486. ucb->gpio.set = ucb1x00_gpio_set;
  487. ucb->gpio.get = ucb1x00_gpio_get;
  488. ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
  489. ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
  490. ucb->gpio.to_irq = ucb1x00_to_irq;
  491. ret = gpiochip_add_data(&ucb->gpio, ucb);
  492. if (ret)
  493. goto err_gpio_add;
  494. } else
  495. dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
  496. mcp_set_drvdata(mcp, ucb);
  497. if (pdata)
  498. device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
  499. INIT_LIST_HEAD(&ucb->devs);
  500. mutex_lock(&ucb1x00_mutex);
  501. list_add_tail(&ucb->node, &ucb1x00_devices);
  502. list_for_each_entry(drv, &ucb1x00_drivers, node) {
  503. ucb1x00_add_dev(ucb, drv);
  504. }
  505. mutex_unlock(&ucb1x00_mutex);
  506. return ret;
  507. err_gpio_add:
  508. irq_set_chained_handler(ucb->irq, NULL);
  509. err_irq_alloc:
  510. if (ucb->irq_base > 0)
  511. irq_free_descs(ucb->irq_base, 16);
  512. err_no_irq:
  513. device_del(&ucb->dev);
  514. err_dev_add:
  515. put_device(&ucb->dev);
  516. out:
  517. if (pdata && pdata->reset)
  518. pdata->reset(UCB_RST_PROBE_FAIL);
  519. return ret;
  520. }
  521. static void ucb1x00_remove(struct mcp *mcp)
  522. {
  523. struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
  524. struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
  525. struct list_head *l, *n;
  526. mutex_lock(&ucb1x00_mutex);
  527. list_del(&ucb->node);
  528. list_for_each_safe(l, n, &ucb->devs) {
  529. struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node);
  530. ucb1x00_remove_dev(dev);
  531. }
  532. mutex_unlock(&ucb1x00_mutex);
  533. if (ucb->gpio.base != -1)
  534. gpiochip_remove(&ucb->gpio);
  535. irq_set_chained_handler(ucb->irq, NULL);
  536. irq_free_descs(ucb->irq_base, 16);
  537. device_unregister(&ucb->dev);
  538. if (pdata && pdata->reset)
  539. pdata->reset(UCB_RST_REMOVE);
  540. }
  541. int ucb1x00_register_driver(struct ucb1x00_driver *drv)
  542. {
  543. struct ucb1x00 *ucb;
  544. INIT_LIST_HEAD(&drv->devs);
  545. mutex_lock(&ucb1x00_mutex);
  546. list_add_tail(&drv->node, &ucb1x00_drivers);
  547. list_for_each_entry(ucb, &ucb1x00_devices, node) {
  548. ucb1x00_add_dev(ucb, drv);
  549. }
  550. mutex_unlock(&ucb1x00_mutex);
  551. return 0;
  552. }
  553. void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
  554. {
  555. struct list_head *n, *l;
  556. mutex_lock(&ucb1x00_mutex);
  557. list_del(&drv->node);
  558. list_for_each_safe(l, n, &drv->devs) {
  559. struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node);
  560. ucb1x00_remove_dev(dev);
  561. }
  562. mutex_unlock(&ucb1x00_mutex);
  563. }
  564. #ifdef CONFIG_PM_SLEEP
  565. static int ucb1x00_suspend(struct device *dev)
  566. {
  567. struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
  568. struct ucb1x00 *ucb = dev_get_drvdata(dev);
  569. struct ucb1x00_dev *udev;
  570. mutex_lock(&ucb1x00_mutex);
  571. list_for_each_entry(udev, &ucb->devs, dev_node) {
  572. if (udev->drv->suspend)
  573. udev->drv->suspend(udev);
  574. }
  575. mutex_unlock(&ucb1x00_mutex);
  576. if (ucb->irq_wake) {
  577. unsigned long flags;
  578. raw_spin_lock_irqsave(&ucb->irq_lock, flags);
  579. ucb1x00_enable(ucb);
  580. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  581. ucb->irq_wake);
  582. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  583. ucb->irq_wake);
  584. ucb1x00_disable(ucb);
  585. raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
  586. enable_irq_wake(ucb->irq);
  587. } else if (pdata && pdata->reset)
  588. pdata->reset(UCB_RST_SUSPEND);
  589. return 0;
  590. }
  591. static int ucb1x00_resume(struct device *dev)
  592. {
  593. struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
  594. struct ucb1x00 *ucb = dev_get_drvdata(dev);
  595. struct ucb1x00_dev *udev;
  596. if (!ucb->irq_wake && pdata && pdata->reset)
  597. pdata->reset(UCB_RST_RESUME);
  598. ucb1x00_enable(ucb);
  599. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  600. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  601. if (ucb->irq_wake) {
  602. unsigned long flags;
  603. raw_spin_lock_irqsave(&ucb->irq_lock, flags);
  604. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  605. ucb->irq_mask);
  606. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  607. ucb->irq_mask);
  608. raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
  609. disable_irq_wake(ucb->irq);
  610. }
  611. ucb1x00_disable(ucb);
  612. mutex_lock(&ucb1x00_mutex);
  613. list_for_each_entry(udev, &ucb->devs, dev_node) {
  614. if (udev->drv->resume)
  615. udev->drv->resume(udev);
  616. }
  617. mutex_unlock(&ucb1x00_mutex);
  618. return 0;
  619. }
  620. #endif
  621. static SIMPLE_DEV_PM_OPS(ucb1x00_pm_ops, ucb1x00_suspend, ucb1x00_resume);
  622. static struct mcp_driver ucb1x00_driver = {
  623. .drv = {
  624. .name = "ucb1x00",
  625. .owner = THIS_MODULE,
  626. .pm = &ucb1x00_pm_ops,
  627. },
  628. .probe = ucb1x00_probe,
  629. .remove = ucb1x00_remove,
  630. };
  631. static int __init ucb1x00_init(void)
  632. {
  633. int ret = class_register(&ucb1x00_class);
  634. if (ret == 0) {
  635. ret = mcp_driver_register(&ucb1x00_driver);
  636. if (ret)
  637. class_unregister(&ucb1x00_class);
  638. }
  639. return ret;
  640. }
  641. static void __exit ucb1x00_exit(void)
  642. {
  643. mcp_driver_unregister(&ucb1x00_driver);
  644. class_unregister(&ucb1x00_class);
  645. }
  646. module_init(ucb1x00_init);
  647. module_exit(ucb1x00_exit);
  648. EXPORT_SYMBOL(ucb1x00_io_set_dir);
  649. EXPORT_SYMBOL(ucb1x00_io_write);
  650. EXPORT_SYMBOL(ucb1x00_io_read);
  651. EXPORT_SYMBOL(ucb1x00_adc_enable);
  652. EXPORT_SYMBOL(ucb1x00_adc_read);
  653. EXPORT_SYMBOL(ucb1x00_adc_disable);
  654. EXPORT_SYMBOL(ucb1x00_register_driver);
  655. EXPORT_SYMBOL(ucb1x00_unregister_driver);
  656. MODULE_ALIAS("mcp:ucb1x00");
  657. MODULE_AUTHOR("Russell King <[email protected]>");
  658. MODULE_DESCRIPTION("UCB1x00 core driver");
  659. MODULE_LICENSE("GPL");