regmap-irq.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // regmap based irq_chip
  4. //
  5. // Copyright 2011 Wolfson Microelectronics plc
  6. //
  7. // Author: Mark Brown <[email protected]>
  8. #include <linux/device.h>
  9. #include <linux/export.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/irq.h>
  12. #include <linux/irqdomain.h>
  13. #include <linux/pm_runtime.h>
  14. #include <linux/regmap.h>
  15. #include <linux/slab.h>
  16. #include "internal.h"
  17. struct regmap_irq_chip_data {
  18. struct mutex lock;
  19. struct irq_chip irq_chip;
  20. struct regmap *map;
  21. const struct regmap_irq_chip *chip;
  22. int irq_base;
  23. struct irq_domain *domain;
  24. int irq;
  25. int wake_count;
  26. unsigned int mask_base;
  27. unsigned int unmask_base;
  28. void *status_reg_buf;
  29. unsigned int *main_status_buf;
  30. unsigned int *status_buf;
  31. unsigned int *mask_buf;
  32. unsigned int *mask_buf_def;
  33. unsigned int *wake_buf;
  34. unsigned int *type_buf;
  35. unsigned int *type_buf_def;
  36. unsigned int **virt_buf;
  37. unsigned int **config_buf;
  38. unsigned int irq_reg_stride;
  39. unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
  40. unsigned int base, int index);
  41. unsigned int clear_status:1;
  42. };
  43. static inline const
  44. struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
  45. int irq)
  46. {
  47. return &data->chip->irqs[irq];
  48. }
  49. static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data)
  50. {
  51. struct regmap *map = data->map;
  52. /*
  53. * While possible that a user-defined ->get_irq_reg() callback might
  54. * be linear enough to support bulk reads, most of the time it won't.
  55. * Therefore only allow them if the default callback is being used.
  56. */
  57. return data->irq_reg_stride == 1 && map->reg_stride == 1 &&
  58. data->get_irq_reg == regmap_irq_get_irq_reg_linear &&
  59. !map->use_single_read;
  60. }
  61. static void regmap_irq_lock(struct irq_data *data)
  62. {
  63. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  64. mutex_lock(&d->lock);
  65. }
  66. static void regmap_irq_sync_unlock(struct irq_data *data)
  67. {
  68. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  69. struct regmap *map = d->map;
  70. int i, j, ret;
  71. u32 reg;
  72. u32 val;
  73. if (d->chip->runtime_pm) {
  74. ret = pm_runtime_get_sync(map->dev);
  75. if (ret < 0)
  76. dev_err(map->dev, "IRQ sync failed to resume: %d\n",
  77. ret);
  78. }
  79. if (d->clear_status) {
  80. for (i = 0; i < d->chip->num_regs; i++) {
  81. reg = d->get_irq_reg(d, d->chip->status_base, i);
  82. ret = regmap_read(map, reg, &val);
  83. if (ret)
  84. dev_err(d->map->dev,
  85. "Failed to clear the interrupt status bits\n");
  86. }
  87. d->clear_status = false;
  88. }
  89. /*
  90. * If there's been a change in the mask write it back to the
  91. * hardware. We rely on the use of the regmap core cache to
  92. * suppress pointless writes.
  93. */
  94. for (i = 0; i < d->chip->num_regs; i++) {
  95. if (d->mask_base) {
  96. reg = d->get_irq_reg(d, d->mask_base, i);
  97. ret = regmap_update_bits(d->map, reg,
  98. d->mask_buf_def[i], d->mask_buf[i]);
  99. if (ret)
  100. dev_err(d->map->dev, "Failed to sync masks in %x\n",
  101. reg);
  102. }
  103. if (d->unmask_base) {
  104. reg = d->get_irq_reg(d, d->unmask_base, i);
  105. ret = regmap_update_bits(d->map, reg,
  106. d->mask_buf_def[i], ~d->mask_buf[i]);
  107. if (ret)
  108. dev_err(d->map->dev, "Failed to sync masks in %x\n",
  109. reg);
  110. }
  111. reg = d->get_irq_reg(d, d->chip->wake_base, i);
  112. if (d->wake_buf) {
  113. if (d->chip->wake_invert)
  114. ret = regmap_update_bits(d->map, reg,
  115. d->mask_buf_def[i],
  116. ~d->wake_buf[i]);
  117. else
  118. ret = regmap_update_bits(d->map, reg,
  119. d->mask_buf_def[i],
  120. d->wake_buf[i]);
  121. if (ret != 0)
  122. dev_err(d->map->dev,
  123. "Failed to sync wakes in %x: %d\n",
  124. reg, ret);
  125. }
  126. if (!d->chip->init_ack_masked)
  127. continue;
  128. /*
  129. * Ack all the masked interrupts unconditionally,
  130. * OR if there is masked interrupt which hasn't been Acked,
  131. * it'll be ignored in irq handler, then may introduce irq storm
  132. */
  133. if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
  134. reg = d->get_irq_reg(d, d->chip->ack_base, i);
  135. /* some chips ack by write 0 */
  136. if (d->chip->ack_invert)
  137. ret = regmap_write(map, reg, ~d->mask_buf[i]);
  138. else
  139. ret = regmap_write(map, reg, d->mask_buf[i]);
  140. if (d->chip->clear_ack) {
  141. if (d->chip->ack_invert && !ret)
  142. ret = regmap_write(map, reg, UINT_MAX);
  143. else if (!ret)
  144. ret = regmap_write(map, reg, 0);
  145. }
  146. if (ret != 0)
  147. dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
  148. reg, ret);
  149. }
  150. }
  151. /* Don't update the type bits if we're using mask bits for irq type. */
  152. if (!d->chip->type_in_mask) {
  153. for (i = 0; i < d->chip->num_type_reg; i++) {
  154. if (!d->type_buf_def[i])
  155. continue;
  156. reg = d->get_irq_reg(d, d->chip->type_base, i);
  157. if (d->chip->type_invert)
  158. ret = regmap_update_bits(d->map, reg,
  159. d->type_buf_def[i], ~d->type_buf[i]);
  160. else
  161. ret = regmap_update_bits(d->map, reg,
  162. d->type_buf_def[i], d->type_buf[i]);
  163. if (ret != 0)
  164. dev_err(d->map->dev, "Failed to sync type in %x\n",
  165. reg);
  166. }
  167. }
  168. if (d->chip->num_virt_regs) {
  169. for (i = 0; i < d->chip->num_virt_regs; i++) {
  170. for (j = 0; j < d->chip->num_regs; j++) {
  171. reg = d->get_irq_reg(d, d->chip->virt_reg_base[i],
  172. j);
  173. ret = regmap_write(map, reg, d->virt_buf[i][j]);
  174. if (ret != 0)
  175. dev_err(d->map->dev,
  176. "Failed to write virt 0x%x: %d\n",
  177. reg, ret);
  178. }
  179. }
  180. }
  181. for (i = 0; i < d->chip->num_config_bases; i++) {
  182. for (j = 0; j < d->chip->num_config_regs; j++) {
  183. reg = d->get_irq_reg(d, d->chip->config_base[i], j);
  184. ret = regmap_write(map, reg, d->config_buf[i][j]);
  185. if (ret)
  186. dev_err(d->map->dev,
  187. "Failed to write config %x: %d\n",
  188. reg, ret);
  189. }
  190. }
  191. if (d->chip->runtime_pm)
  192. pm_runtime_put(map->dev);
  193. /* If we've changed our wakeup count propagate it to the parent */
  194. if (d->wake_count < 0)
  195. for (i = d->wake_count; i < 0; i++)
  196. irq_set_irq_wake(d->irq, 0);
  197. else if (d->wake_count > 0)
  198. for (i = 0; i < d->wake_count; i++)
  199. irq_set_irq_wake(d->irq, 1);
  200. d->wake_count = 0;
  201. mutex_unlock(&d->lock);
  202. }
  203. static void regmap_irq_enable(struct irq_data *data)
  204. {
  205. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  206. struct regmap *map = d->map;
  207. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  208. unsigned int reg = irq_data->reg_offset / map->reg_stride;
  209. unsigned int mask;
  210. /*
  211. * The type_in_mask flag means that the underlying hardware uses
  212. * separate mask bits for each interrupt trigger type, but we want
  213. * to have a single logical interrupt with a configurable type.
  214. *
  215. * If the interrupt we're enabling defines any supported types
  216. * then instead of using the regular mask bits for this interrupt,
  217. * use the value previously written to the type buffer at the
  218. * corresponding offset in regmap_irq_set_type().
  219. */
  220. if (d->chip->type_in_mask && irq_data->type.types_supported)
  221. mask = d->type_buf[reg] & irq_data->mask;
  222. else
  223. mask = irq_data->mask;
  224. if (d->chip->clear_on_unmask)
  225. d->clear_status = true;
  226. d->mask_buf[reg] &= ~mask;
  227. }
  228. static void regmap_irq_disable(struct irq_data *data)
  229. {
  230. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  231. struct regmap *map = d->map;
  232. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  233. d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
  234. }
  235. static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
  236. {
  237. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  238. struct regmap *map = d->map;
  239. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  240. int reg, ret;
  241. const struct regmap_irq_type *t = &irq_data->type;
  242. if ((t->types_supported & type) != type)
  243. return 0;
  244. reg = t->type_reg_offset / map->reg_stride;
  245. if (t->type_reg_mask)
  246. d->type_buf[reg] &= ~t->type_reg_mask;
  247. else
  248. d->type_buf[reg] &= ~(t->type_falling_val |
  249. t->type_rising_val |
  250. t->type_level_low_val |
  251. t->type_level_high_val);
  252. switch (type) {
  253. case IRQ_TYPE_EDGE_FALLING:
  254. d->type_buf[reg] |= t->type_falling_val;
  255. break;
  256. case IRQ_TYPE_EDGE_RISING:
  257. d->type_buf[reg] |= t->type_rising_val;
  258. break;
  259. case IRQ_TYPE_EDGE_BOTH:
  260. d->type_buf[reg] |= (t->type_falling_val |
  261. t->type_rising_val);
  262. break;
  263. case IRQ_TYPE_LEVEL_HIGH:
  264. d->type_buf[reg] |= t->type_level_high_val;
  265. break;
  266. case IRQ_TYPE_LEVEL_LOW:
  267. d->type_buf[reg] |= t->type_level_low_val;
  268. break;
  269. default:
  270. return -EINVAL;
  271. }
  272. if (d->chip->set_type_virt) {
  273. ret = d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
  274. reg);
  275. if (ret)
  276. return ret;
  277. }
  278. if (d->chip->set_type_config) {
  279. ret = d->chip->set_type_config(d->config_buf, type,
  280. irq_data, reg);
  281. if (ret)
  282. return ret;
  283. }
  284. return 0;
  285. }
  286. static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
  287. {
  288. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  289. struct regmap *map = d->map;
  290. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  291. if (on) {
  292. if (d->wake_buf)
  293. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  294. &= ~irq_data->mask;
  295. d->wake_count++;
  296. } else {
  297. if (d->wake_buf)
  298. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  299. |= irq_data->mask;
  300. d->wake_count--;
  301. }
  302. return 0;
  303. }
  304. static const struct irq_chip regmap_irq_chip = {
  305. .irq_bus_lock = regmap_irq_lock,
  306. .irq_bus_sync_unlock = regmap_irq_sync_unlock,
  307. .irq_disable = regmap_irq_disable,
  308. .irq_enable = regmap_irq_enable,
  309. .irq_set_type = regmap_irq_set_type,
  310. .irq_set_wake = regmap_irq_set_wake,
  311. };
  312. static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
  313. unsigned int b)
  314. {
  315. const struct regmap_irq_chip *chip = data->chip;
  316. struct regmap *map = data->map;
  317. struct regmap_irq_sub_irq_map *subreg;
  318. unsigned int reg;
  319. int i, ret = 0;
  320. if (!chip->sub_reg_offsets) {
  321. reg = data->get_irq_reg(data, chip->status_base, b);
  322. ret = regmap_read(map, reg, &data->status_buf[b]);
  323. } else {
  324. /*
  325. * Note we can't use ->get_irq_reg() here because the offsets
  326. * in 'subreg' are *not* interchangeable with indices.
  327. */
  328. subreg = &chip->sub_reg_offsets[b];
  329. for (i = 0; i < subreg->num_regs; i++) {
  330. unsigned int offset = subreg->offset[i];
  331. unsigned int index = offset / map->reg_stride;
  332. if (chip->not_fixed_stride)
  333. ret = regmap_read(map,
  334. chip->status_base + offset,
  335. &data->status_buf[b]);
  336. else
  337. ret = regmap_read(map,
  338. chip->status_base + offset,
  339. &data->status_buf[index]);
  340. if (ret)
  341. break;
  342. }
  343. }
  344. return ret;
  345. }
  346. static irqreturn_t regmap_irq_thread(int irq, void *d)
  347. {
  348. struct regmap_irq_chip_data *data = d;
  349. const struct regmap_irq_chip *chip = data->chip;
  350. struct regmap *map = data->map;
  351. int ret, i;
  352. bool handled = false;
  353. u32 reg;
  354. if (chip->handle_pre_irq)
  355. chip->handle_pre_irq(chip->irq_drv_data);
  356. if (chip->runtime_pm) {
  357. ret = pm_runtime_get_sync(map->dev);
  358. if (ret < 0) {
  359. dev_err(map->dev, "IRQ thread failed to resume: %d\n",
  360. ret);
  361. goto exit;
  362. }
  363. }
  364. /*
  365. * Read only registers with active IRQs if the chip has 'main status
  366. * register'. Else read in the statuses, using a single bulk read if
  367. * possible in order to reduce the I/O overheads.
  368. */
  369. if (chip->num_main_regs) {
  370. unsigned int max_main_bits;
  371. unsigned long size;
  372. size = chip->num_regs * sizeof(unsigned int);
  373. max_main_bits = (chip->num_main_status_bits) ?
  374. chip->num_main_status_bits : chip->num_regs;
  375. /* Clear the status buf as we don't read all status regs */
  376. memset(data->status_buf, 0, size);
  377. /* We could support bulk read for main status registers
  378. * but I don't expect to see devices with really many main
  379. * status registers so let's only support single reads for the
  380. * sake of simplicity. and add bulk reads only if needed
  381. */
  382. for (i = 0; i < chip->num_main_regs; i++) {
  383. /*
  384. * For not_fixed_stride, don't use ->get_irq_reg().
  385. * It would produce an incorrect result.
  386. */
  387. if (data->chip->not_fixed_stride)
  388. reg = chip->main_status +
  389. i * map->reg_stride * data->irq_reg_stride;
  390. else
  391. reg = data->get_irq_reg(data,
  392. chip->main_status, i);
  393. ret = regmap_read(map, reg, &data->main_status_buf[i]);
  394. if (ret) {
  395. dev_err(map->dev,
  396. "Failed to read IRQ status %d\n",
  397. ret);
  398. goto exit;
  399. }
  400. }
  401. /* Read sub registers with active IRQs */
  402. for (i = 0; i < chip->num_main_regs; i++) {
  403. unsigned int b;
  404. const unsigned long mreg = data->main_status_buf[i];
  405. for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
  406. if (i * map->format.val_bytes * 8 + b >
  407. max_main_bits)
  408. break;
  409. ret = read_sub_irq_data(data, b);
  410. if (ret != 0) {
  411. dev_err(map->dev,
  412. "Failed to read IRQ status %d\n",
  413. ret);
  414. goto exit;
  415. }
  416. }
  417. }
  418. } else if (regmap_irq_can_bulk_read_status(data)) {
  419. u8 *buf8 = data->status_reg_buf;
  420. u16 *buf16 = data->status_reg_buf;
  421. u32 *buf32 = data->status_reg_buf;
  422. BUG_ON(!data->status_reg_buf);
  423. ret = regmap_bulk_read(map, chip->status_base,
  424. data->status_reg_buf,
  425. chip->num_regs);
  426. if (ret != 0) {
  427. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  428. ret);
  429. goto exit;
  430. }
  431. for (i = 0; i < data->chip->num_regs; i++) {
  432. switch (map->format.val_bytes) {
  433. case 1:
  434. data->status_buf[i] = buf8[i];
  435. break;
  436. case 2:
  437. data->status_buf[i] = buf16[i];
  438. break;
  439. case 4:
  440. data->status_buf[i] = buf32[i];
  441. break;
  442. default:
  443. BUG();
  444. goto exit;
  445. }
  446. }
  447. } else {
  448. for (i = 0; i < data->chip->num_regs; i++) {
  449. unsigned int reg = data->get_irq_reg(data,
  450. data->chip->status_base, i);
  451. ret = regmap_read(map, reg, &data->status_buf[i]);
  452. if (ret != 0) {
  453. dev_err(map->dev,
  454. "Failed to read IRQ status: %d\n",
  455. ret);
  456. goto exit;
  457. }
  458. }
  459. }
  460. if (chip->status_invert)
  461. for (i = 0; i < data->chip->num_regs; i++)
  462. data->status_buf[i] = ~data->status_buf[i];
  463. /*
  464. * Ignore masked IRQs and ack if we need to; we ack early so
  465. * there is no race between handling and acknowledging the
  466. * interrupt. We assume that typically few of the interrupts
  467. * will fire simultaneously so don't worry about overhead from
  468. * doing a write per register.
  469. */
  470. for (i = 0; i < data->chip->num_regs; i++) {
  471. data->status_buf[i] &= ~data->mask_buf[i];
  472. if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
  473. reg = data->get_irq_reg(data, data->chip->ack_base, i);
  474. if (chip->ack_invert)
  475. ret = regmap_write(map, reg,
  476. ~data->status_buf[i]);
  477. else
  478. ret = regmap_write(map, reg,
  479. data->status_buf[i]);
  480. if (chip->clear_ack) {
  481. if (chip->ack_invert && !ret)
  482. ret = regmap_write(map, reg, UINT_MAX);
  483. else if (!ret)
  484. ret = regmap_write(map, reg, 0);
  485. }
  486. if (ret != 0)
  487. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  488. reg, ret);
  489. }
  490. }
  491. for (i = 0; i < chip->num_irqs; i++) {
  492. if (data->status_buf[chip->irqs[i].reg_offset /
  493. map->reg_stride] & chip->irqs[i].mask) {
  494. handle_nested_irq(irq_find_mapping(data->domain, i));
  495. handled = true;
  496. }
  497. }
  498. exit:
  499. if (chip->runtime_pm)
  500. pm_runtime_put(map->dev);
  501. if (chip->handle_post_irq)
  502. chip->handle_post_irq(chip->irq_drv_data);
  503. if (handled)
  504. return IRQ_HANDLED;
  505. else
  506. return IRQ_NONE;
  507. }
  508. static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
  509. irq_hw_number_t hw)
  510. {
  511. struct regmap_irq_chip_data *data = h->host_data;
  512. irq_set_chip_data(virq, data);
  513. irq_set_chip(virq, &data->irq_chip);
  514. irq_set_nested_thread(virq, 1);
  515. irq_set_parent(virq, data->irq);
  516. irq_set_noprobe(virq);
  517. return 0;
  518. }
  519. static const struct irq_domain_ops regmap_domain_ops = {
  520. .map = regmap_irq_map,
  521. .xlate = irq_domain_xlate_onetwocell,
  522. };
  523. /**
  524. * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback.
  525. * @data: Data for the &struct regmap_irq_chip
  526. * @base: Base register
  527. * @index: Register index
  528. *
  529. * Returns the register address corresponding to the given @base and @index
  530. * by the formula ``base + index * regmap_stride * irq_reg_stride``.
  531. */
  532. unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
  533. unsigned int base, int index)
  534. {
  535. const struct regmap_irq_chip *chip = data->chip;
  536. struct regmap *map = data->map;
  537. /*
  538. * FIXME: This is for backward compatibility and should be removed
  539. * when not_fixed_stride is dropped (it's only used by qcom-pm8008).
  540. */
  541. if (chip->not_fixed_stride && chip->sub_reg_offsets) {
  542. struct regmap_irq_sub_irq_map *subreg;
  543. subreg = &chip->sub_reg_offsets[0];
  544. return base + subreg->offset[0];
  545. }
  546. return base + index * map->reg_stride * data->irq_reg_stride;
  547. }
  548. EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear);
  549. /**
  550. * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback.
  551. * @buf: Buffer containing configuration register values, this is a 2D array of
  552. * `num_config_bases` rows, each of `num_config_regs` elements.
  553. * @type: The requested IRQ type.
  554. * @irq_data: The IRQ being configured.
  555. * @idx: Index of the irq's config registers within each array `buf[i]`
  556. *
  557. * This is a &struct regmap_irq_chip->set_type_config callback suitable for
  558. * chips with one config register. Register values are updated according to
  559. * the &struct regmap_irq_type data associated with an IRQ.
  560. */
  561. int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
  562. const struct regmap_irq *irq_data, int idx)
  563. {
  564. const struct regmap_irq_type *t = &irq_data->type;
  565. if (t->type_reg_mask)
  566. buf[0][idx] &= ~t->type_reg_mask;
  567. else
  568. buf[0][idx] &= ~(t->type_falling_val |
  569. t->type_rising_val |
  570. t->type_level_low_val |
  571. t->type_level_high_val);
  572. switch (type) {
  573. case IRQ_TYPE_EDGE_FALLING:
  574. buf[0][idx] |= t->type_falling_val;
  575. break;
  576. case IRQ_TYPE_EDGE_RISING:
  577. buf[0][idx] |= t->type_rising_val;
  578. break;
  579. case IRQ_TYPE_EDGE_BOTH:
  580. buf[0][idx] |= (t->type_falling_val |
  581. t->type_rising_val);
  582. break;
  583. case IRQ_TYPE_LEVEL_HIGH:
  584. buf[0][idx] |= t->type_level_high_val;
  585. break;
  586. case IRQ_TYPE_LEVEL_LOW:
  587. buf[0][idx] |= t->type_level_low_val;
  588. break;
  589. default:
  590. return -EINVAL;
  591. }
  592. return 0;
  593. }
  594. EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
  595. /**
  596. * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
  597. *
  598. * @fwnode: The firmware node where the IRQ domain should be added to.
  599. * @map: The regmap for the device.
  600. * @irq: The IRQ the device uses to signal interrupts.
  601. * @irq_flags: The IRQF_ flags to use for the primary interrupt.
  602. * @irq_base: Allocate at specific IRQ number if irq_base > 0.
  603. * @chip: Configuration for the interrupt controller.
  604. * @data: Runtime data structure for the controller, allocated on success.
  605. *
  606. * Returns 0 on success or an errno on failure.
  607. *
  608. * In order for this to be efficient the chip really should use a
  609. * register cache. The chip driver is responsible for restoring the
  610. * register values used by the IRQ controller over suspend and resume.
  611. */
  612. int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
  613. struct regmap *map, int irq,
  614. int irq_flags, int irq_base,
  615. const struct regmap_irq_chip *chip,
  616. struct regmap_irq_chip_data **data)
  617. {
  618. struct regmap_irq_chip_data *d;
  619. int i;
  620. int ret = -ENOMEM;
  621. int num_type_reg;
  622. int num_regs;
  623. u32 reg;
  624. if (chip->num_regs <= 0)
  625. return -EINVAL;
  626. if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
  627. return -EINVAL;
  628. for (i = 0; i < chip->num_irqs; i++) {
  629. if (chip->irqs[i].reg_offset % map->reg_stride)
  630. return -EINVAL;
  631. if (chip->irqs[i].reg_offset / map->reg_stride >=
  632. chip->num_regs)
  633. return -EINVAL;
  634. }
  635. if (chip->not_fixed_stride) {
  636. dev_warn(map->dev, "not_fixed_stride is deprecated; use ->get_irq_reg() instead");
  637. for (i = 0; i < chip->num_regs; i++)
  638. if (chip->sub_reg_offsets[i].num_regs != 1)
  639. return -EINVAL;
  640. }
  641. if (chip->num_type_reg)
  642. dev_warn(map->dev, "type registers are deprecated; use config registers instead");
  643. if (chip->num_virt_regs || chip->virt_reg_base || chip->set_type_virt)
  644. dev_warn(map->dev, "virtual registers are deprecated; use config registers instead");
  645. if (irq_base) {
  646. irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
  647. if (irq_base < 0) {
  648. dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
  649. irq_base);
  650. return irq_base;
  651. }
  652. }
  653. d = kzalloc(sizeof(*d), GFP_KERNEL);
  654. if (!d)
  655. return -ENOMEM;
  656. if (chip->num_main_regs) {
  657. d->main_status_buf = kcalloc(chip->num_main_regs,
  658. sizeof(*d->main_status_buf),
  659. GFP_KERNEL);
  660. if (!d->main_status_buf)
  661. goto err_alloc;
  662. }
  663. d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf),
  664. GFP_KERNEL);
  665. if (!d->status_buf)
  666. goto err_alloc;
  667. d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
  668. GFP_KERNEL);
  669. if (!d->mask_buf)
  670. goto err_alloc;
  671. d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def),
  672. GFP_KERNEL);
  673. if (!d->mask_buf_def)
  674. goto err_alloc;
  675. if (chip->wake_base) {
  676. d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf),
  677. GFP_KERNEL);
  678. if (!d->wake_buf)
  679. goto err_alloc;
  680. }
  681. /*
  682. * Use num_config_regs if defined, otherwise fall back to num_type_reg
  683. * to maintain backward compatibility.
  684. */
  685. num_type_reg = chip->num_config_regs ? chip->num_config_regs
  686. : chip->num_type_reg;
  687. num_regs = chip->type_in_mask ? chip->num_regs : num_type_reg;
  688. if (num_regs) {
  689. d->type_buf_def = kcalloc(num_regs,
  690. sizeof(*d->type_buf_def), GFP_KERNEL);
  691. if (!d->type_buf_def)
  692. goto err_alloc;
  693. d->type_buf = kcalloc(num_regs, sizeof(*d->type_buf),
  694. GFP_KERNEL);
  695. if (!d->type_buf)
  696. goto err_alloc;
  697. }
  698. if (chip->num_virt_regs) {
  699. /*
  700. * Create virt_buf[chip->num_extra_config_regs][chip->num_regs]
  701. */
  702. d->virt_buf = kcalloc(chip->num_virt_regs, sizeof(*d->virt_buf),
  703. GFP_KERNEL);
  704. if (!d->virt_buf)
  705. goto err_alloc;
  706. for (i = 0; i < chip->num_virt_regs; i++) {
  707. d->virt_buf[i] = kcalloc(chip->num_regs,
  708. sizeof(**d->virt_buf),
  709. GFP_KERNEL);
  710. if (!d->virt_buf[i])
  711. goto err_alloc;
  712. }
  713. }
  714. if (chip->num_config_bases && chip->num_config_regs) {
  715. /*
  716. * Create config_buf[num_config_bases][num_config_regs]
  717. */
  718. d->config_buf = kcalloc(chip->num_config_bases,
  719. sizeof(*d->config_buf), GFP_KERNEL);
  720. if (!d->config_buf)
  721. goto err_alloc;
  722. for (i = 0; i < chip->num_config_bases; i++) {
  723. d->config_buf[i] = kcalloc(chip->num_config_regs,
  724. sizeof(**d->config_buf),
  725. GFP_KERNEL);
  726. if (!d->config_buf[i])
  727. goto err_alloc;
  728. }
  729. }
  730. d->irq_chip = regmap_irq_chip;
  731. d->irq_chip.name = chip->name;
  732. d->irq = irq;
  733. d->map = map;
  734. d->chip = chip;
  735. d->irq_base = irq_base;
  736. if (chip->mask_base && chip->unmask_base &&
  737. !chip->mask_unmask_non_inverted) {
  738. /*
  739. * Chips that specify both mask_base and unmask_base used to
  740. * get inverted mask behavior by default, with no way to ask
  741. * for the normal, non-inverted behavior. This "inverted by
  742. * default" behavior is deprecated, but we have to support it
  743. * until existing drivers have been fixed.
  744. *
  745. * Existing drivers should be updated by swapping mask_base
  746. * and unmask_base and setting mask_unmask_non_inverted=true.
  747. * New drivers should always set the flag.
  748. */
  749. dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it");
  750. /* Might as well warn about mask_invert while we're at it... */
  751. if (chip->mask_invert)
  752. dev_warn(map->dev, "mask_invert=true ignored");
  753. d->mask_base = chip->unmask_base;
  754. d->unmask_base = chip->mask_base;
  755. } else if (chip->mask_invert) {
  756. /*
  757. * Swap the roles of mask_base and unmask_base if the bits are
  758. * inverted. This is deprecated, drivers should use unmask_base
  759. * directly.
  760. */
  761. dev_warn(map->dev, "mask_invert=true is deprecated; please switch to unmask_base");
  762. d->mask_base = chip->unmask_base;
  763. d->unmask_base = chip->mask_base;
  764. } else {
  765. d->mask_base = chip->mask_base;
  766. d->unmask_base = chip->unmask_base;
  767. }
  768. if (chip->irq_reg_stride)
  769. d->irq_reg_stride = chip->irq_reg_stride;
  770. else
  771. d->irq_reg_stride = 1;
  772. if (chip->get_irq_reg)
  773. d->get_irq_reg = chip->get_irq_reg;
  774. else
  775. d->get_irq_reg = regmap_irq_get_irq_reg_linear;
  776. if (regmap_irq_can_bulk_read_status(d)) {
  777. d->status_reg_buf = kmalloc_array(chip->num_regs,
  778. map->format.val_bytes,
  779. GFP_KERNEL);
  780. if (!d->status_reg_buf)
  781. goto err_alloc;
  782. }
  783. mutex_init(&d->lock);
  784. for (i = 0; i < chip->num_irqs; i++)
  785. d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
  786. |= chip->irqs[i].mask;
  787. /* Mask all the interrupts by default */
  788. for (i = 0; i < chip->num_regs; i++) {
  789. d->mask_buf[i] = d->mask_buf_def[i];
  790. if (d->mask_base) {
  791. reg = d->get_irq_reg(d, d->mask_base, i);
  792. ret = regmap_update_bits(d->map, reg,
  793. d->mask_buf_def[i], d->mask_buf[i]);
  794. if (ret) {
  795. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  796. reg, ret);
  797. goto err_alloc;
  798. }
  799. }
  800. if (d->unmask_base) {
  801. reg = d->get_irq_reg(d, d->unmask_base, i);
  802. ret = regmap_update_bits(d->map, reg,
  803. d->mask_buf_def[i], ~d->mask_buf[i]);
  804. if (ret) {
  805. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  806. reg, ret);
  807. goto err_alloc;
  808. }
  809. }
  810. if (!chip->init_ack_masked)
  811. continue;
  812. /* Ack masked but set interrupts */
  813. reg = d->get_irq_reg(d, d->chip->status_base, i);
  814. ret = regmap_read(map, reg, &d->status_buf[i]);
  815. if (ret != 0) {
  816. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  817. ret);
  818. goto err_alloc;
  819. }
  820. if (chip->status_invert)
  821. d->status_buf[i] = ~d->status_buf[i];
  822. if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
  823. reg = d->get_irq_reg(d, d->chip->ack_base, i);
  824. if (chip->ack_invert)
  825. ret = regmap_write(map, reg,
  826. ~(d->status_buf[i] & d->mask_buf[i]));
  827. else
  828. ret = regmap_write(map, reg,
  829. d->status_buf[i] & d->mask_buf[i]);
  830. if (chip->clear_ack) {
  831. if (chip->ack_invert && !ret)
  832. ret = regmap_write(map, reg, UINT_MAX);
  833. else if (!ret)
  834. ret = regmap_write(map, reg, 0);
  835. }
  836. if (ret != 0) {
  837. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  838. reg, ret);
  839. goto err_alloc;
  840. }
  841. }
  842. }
  843. /* Wake is disabled by default */
  844. if (d->wake_buf) {
  845. for (i = 0; i < chip->num_regs; i++) {
  846. d->wake_buf[i] = d->mask_buf_def[i];
  847. reg = d->get_irq_reg(d, d->chip->wake_base, i);
  848. if (chip->wake_invert)
  849. ret = regmap_update_bits(d->map, reg,
  850. d->mask_buf_def[i],
  851. 0);
  852. else
  853. ret = regmap_update_bits(d->map, reg,
  854. d->mask_buf_def[i],
  855. d->wake_buf[i]);
  856. if (ret != 0) {
  857. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  858. reg, ret);
  859. goto err_alloc;
  860. }
  861. }
  862. }
  863. if (chip->num_type_reg && !chip->type_in_mask) {
  864. for (i = 0; i < chip->num_type_reg; ++i) {
  865. reg = d->get_irq_reg(d, d->chip->type_base, i);
  866. ret = regmap_read(map, reg, &d->type_buf_def[i]);
  867. if (d->chip->type_invert)
  868. d->type_buf_def[i] = ~d->type_buf_def[i];
  869. if (ret) {
  870. dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
  871. reg, ret);
  872. goto err_alloc;
  873. }
  874. }
  875. }
  876. if (irq_base)
  877. d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
  878. irq_base, 0,
  879. &regmap_domain_ops, d);
  880. else
  881. d->domain = irq_domain_create_linear(fwnode, chip->num_irqs,
  882. &regmap_domain_ops, d);
  883. if (!d->domain) {
  884. dev_err(map->dev, "Failed to create IRQ domain\n");
  885. ret = -ENOMEM;
  886. goto err_alloc;
  887. }
  888. ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
  889. irq_flags | IRQF_ONESHOT,
  890. chip->name, d);
  891. if (ret != 0) {
  892. dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
  893. irq, chip->name, ret);
  894. goto err_domain;
  895. }
  896. *data = d;
  897. return 0;
  898. err_domain:
  899. /* Should really dispose of the domain but... */
  900. err_alloc:
  901. kfree(d->type_buf);
  902. kfree(d->type_buf_def);
  903. kfree(d->wake_buf);
  904. kfree(d->mask_buf_def);
  905. kfree(d->mask_buf);
  906. kfree(d->status_buf);
  907. kfree(d->status_reg_buf);
  908. if (d->virt_buf) {
  909. for (i = 0; i < chip->num_virt_regs; i++)
  910. kfree(d->virt_buf[i]);
  911. kfree(d->virt_buf);
  912. }
  913. if (d->config_buf) {
  914. for (i = 0; i < chip->num_config_bases; i++)
  915. kfree(d->config_buf[i]);
  916. kfree(d->config_buf);
  917. }
  918. kfree(d);
  919. return ret;
  920. }
  921. EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
  922. /**
  923. * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
  924. *
  925. * @map: The regmap for the device.
  926. * @irq: The IRQ the device uses to signal interrupts.
  927. * @irq_flags: The IRQF_ flags to use for the primary interrupt.
  928. * @irq_base: Allocate at specific IRQ number if irq_base > 0.
  929. * @chip: Configuration for the interrupt controller.
  930. * @data: Runtime data structure for the controller, allocated on success.
  931. *
  932. * Returns 0 on success or an errno on failure.
  933. *
  934. * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
  935. * node of the regmap is used.
  936. */
  937. int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
  938. int irq_base, const struct regmap_irq_chip *chip,
  939. struct regmap_irq_chip_data **data)
  940. {
  941. return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
  942. irq_flags, irq_base, chip, data);
  943. }
  944. EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
  945. /**
  946. * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
  947. *
  948. * @irq: Primary IRQ for the device
  949. * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
  950. *
  951. * This function also disposes of all mapped IRQs on the chip.
  952. */
  953. void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
  954. {
  955. unsigned int virq;
  956. int i, hwirq;
  957. if (!d)
  958. return;
  959. free_irq(irq, d);
  960. /* Dispose all virtual irq from irq domain before removing it */
  961. for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
  962. /* Ignore hwirq if holes in the IRQ list */
  963. if (!d->chip->irqs[hwirq].mask)
  964. continue;
  965. /*
  966. * Find the virtual irq of hwirq on chip and if it is
  967. * there then dispose it
  968. */
  969. virq = irq_find_mapping(d->domain, hwirq);
  970. if (virq)
  971. irq_dispose_mapping(virq);
  972. }
  973. irq_domain_remove(d->domain);
  974. kfree(d->type_buf);
  975. kfree(d->type_buf_def);
  976. kfree(d->wake_buf);
  977. kfree(d->mask_buf_def);
  978. kfree(d->mask_buf);
  979. kfree(d->status_reg_buf);
  980. kfree(d->status_buf);
  981. if (d->config_buf) {
  982. for (i = 0; i < d->chip->num_config_bases; i++)
  983. kfree(d->config_buf[i]);
  984. kfree(d->config_buf);
  985. }
  986. kfree(d);
  987. }
  988. EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
  989. static void devm_regmap_irq_chip_release(struct device *dev, void *res)
  990. {
  991. struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
  992. regmap_del_irq_chip(d->irq, d);
  993. }
  994. static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
  995. {
  996. struct regmap_irq_chip_data **r = res;
  997. if (!r || !*r) {
  998. WARN_ON(!r || !*r);
  999. return 0;
  1000. }
  1001. return *r == data;
  1002. }
  1003. /**
  1004. * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
  1005. *
  1006. * @dev: The device pointer on which irq_chip belongs to.
  1007. * @fwnode: The firmware node where the IRQ domain should be added to.
  1008. * @map: The regmap for the device.
  1009. * @irq: The IRQ the device uses to signal interrupts
  1010. * @irq_flags: The IRQF_ flags to use for the primary interrupt.
  1011. * @irq_base: Allocate at specific IRQ number if irq_base > 0.
  1012. * @chip: Configuration for the interrupt controller.
  1013. * @data: Runtime data structure for the controller, allocated on success
  1014. *
  1015. * Returns 0 on success or an errno on failure.
  1016. *
  1017. * The &regmap_irq_chip_data will be automatically released when the device is
  1018. * unbound.
  1019. */
  1020. int devm_regmap_add_irq_chip_fwnode(struct device *dev,
  1021. struct fwnode_handle *fwnode,
  1022. struct regmap *map, int irq,
  1023. int irq_flags, int irq_base,
  1024. const struct regmap_irq_chip *chip,
  1025. struct regmap_irq_chip_data **data)
  1026. {
  1027. struct regmap_irq_chip_data **ptr, *d;
  1028. int ret;
  1029. ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
  1030. GFP_KERNEL);
  1031. if (!ptr)
  1032. return -ENOMEM;
  1033. ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
  1034. chip, &d);
  1035. if (ret < 0) {
  1036. devres_free(ptr);
  1037. return ret;
  1038. }
  1039. *ptr = d;
  1040. devres_add(dev, ptr);
  1041. *data = d;
  1042. return 0;
  1043. }
  1044. EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
  1045. /**
  1046. * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip()
  1047. *
  1048. * @dev: The device pointer on which irq_chip belongs to.
  1049. * @map: The regmap for the device.
  1050. * @irq: The IRQ the device uses to signal interrupts
  1051. * @irq_flags: The IRQF_ flags to use for the primary interrupt.
  1052. * @irq_base: Allocate at specific IRQ number if irq_base > 0.
  1053. * @chip: Configuration for the interrupt controller.
  1054. * @data: Runtime data structure for the controller, allocated on success
  1055. *
  1056. * Returns 0 on success or an errno on failure.
  1057. *
  1058. * The &regmap_irq_chip_data will be automatically released when the device is
  1059. * unbound.
  1060. */
  1061. int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
  1062. int irq_flags, int irq_base,
  1063. const struct regmap_irq_chip *chip,
  1064. struct regmap_irq_chip_data **data)
  1065. {
  1066. return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
  1067. irq, irq_flags, irq_base, chip,
  1068. data);
  1069. }
  1070. EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
  1071. /**
  1072. * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
  1073. *
  1074. * @dev: Device for which the resource was allocated.
  1075. * @irq: Primary IRQ for the device.
  1076. * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
  1077. *
  1078. * A resource managed version of regmap_del_irq_chip().
  1079. */
  1080. void devm_regmap_del_irq_chip(struct device *dev, int irq,
  1081. struct regmap_irq_chip_data *data)
  1082. {
  1083. int rc;
  1084. WARN_ON(irq != data->irq);
  1085. rc = devres_release(dev, devm_regmap_irq_chip_release,
  1086. devm_regmap_irq_chip_match, data);
  1087. if (rc != 0)
  1088. WARN_ON(rc);
  1089. }
  1090. EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
  1091. /**
  1092. * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
  1093. *
  1094. * @data: regmap irq controller to operate on.
  1095. *
  1096. * Useful for drivers to request their own IRQs.
  1097. */
  1098. int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
  1099. {
  1100. WARN_ON(!data->irq_base);
  1101. return data->irq_base;
  1102. }
  1103. EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
  1104. /**
  1105. * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
  1106. *
  1107. * @data: regmap irq controller to operate on.
  1108. * @irq: index of the interrupt requested in the chip IRQs.
  1109. *
  1110. * Useful for drivers to request their own IRQs.
  1111. */
  1112. int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
  1113. {
  1114. /* Handle holes in the IRQ list */
  1115. if (!data->chip->irqs[irq].mask)
  1116. return -EINVAL;
  1117. return irq_create_mapping(data->domain, irq);
  1118. }
  1119. EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
  1120. /**
  1121. * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
  1122. *
  1123. * @data: regmap_irq controller to operate on.
  1124. *
  1125. * Useful for drivers to request their own IRQs and for integration
  1126. * with subsystems. For ease of integration NULL is accepted as a
  1127. * domain, allowing devices to just call this even if no domain is
  1128. * allocated.
  1129. */
  1130. struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
  1131. {
  1132. if (data)
  1133. return data->domain;
  1134. else
  1135. return NULL;
  1136. }
  1137. EXPORT_SYMBOL_GPL(regmap_irq_get_domain);