gpio-virtio.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * GPIO driver for virtio-based virtual GPIO controllers
  4. *
  5. * Copyright (C) 2021 metux IT consult
  6. * Enrico Weigelt, metux IT consult <[email protected]>
  7. *
  8. * Copyright (C) 2021 Linaro.
  9. * Viresh Kumar <[email protected]>
  10. */
  11. #include <linux/completion.h>
  12. #include <linux/err.h>
  13. #include <linux/gpio/driver.h>
  14. #include <linux/io.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/mutex.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/virtio_config.h>
  20. #include <uapi/linux/virtio_gpio.h>
  21. #include <uapi/linux/virtio_ids.h>
  22. struct virtio_gpio_line {
  23. struct mutex lock; /* Protects line operation */
  24. struct completion completion;
  25. struct virtio_gpio_request req ____cacheline_aligned;
  26. struct virtio_gpio_response res ____cacheline_aligned;
  27. unsigned int rxlen;
  28. };
  29. struct vgpio_irq_line {
  30. u8 type;
  31. bool disabled;
  32. bool masked;
  33. bool queued;
  34. bool update_pending;
  35. bool queue_pending;
  36. struct virtio_gpio_irq_request ireq ____cacheline_aligned;
  37. struct virtio_gpio_irq_response ires ____cacheline_aligned;
  38. };
  39. struct virtio_gpio {
  40. struct virtio_device *vdev;
  41. struct mutex lock; /* Protects virtqueue operation */
  42. struct gpio_chip gc;
  43. struct virtio_gpio_line *lines;
  44. struct virtqueue *request_vq;
  45. /* irq support */
  46. struct virtqueue *event_vq;
  47. struct mutex irq_lock; /* Protects irq operation */
  48. raw_spinlock_t eventq_lock; /* Protects queuing of the buffer */
  49. struct vgpio_irq_line *irq_lines;
  50. };
  51. static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
  52. u8 txvalue, u8 *rxvalue, void *response, u32 rxlen)
  53. {
  54. struct virtio_gpio_line *line = &vgpio->lines[gpio];
  55. struct virtio_gpio_request *req = &line->req;
  56. struct virtio_gpio_response *res = response;
  57. struct scatterlist *sgs[2], req_sg, res_sg;
  58. struct device *dev = &vgpio->vdev->dev;
  59. int ret;
  60. /*
  61. * Prevent concurrent requests for the same line since we have
  62. * pre-allocated request/response buffers for each GPIO line. Moreover
  63. * Linux always accesses a GPIO line sequentially, so this locking shall
  64. * always go through without any delays.
  65. */
  66. mutex_lock(&line->lock);
  67. req->type = cpu_to_le16(type);
  68. req->gpio = cpu_to_le16(gpio);
  69. req->value = cpu_to_le32(txvalue);
  70. sg_init_one(&req_sg, req, sizeof(*req));
  71. sg_init_one(&res_sg, res, rxlen);
  72. sgs[0] = &req_sg;
  73. sgs[1] = &res_sg;
  74. line->rxlen = 0;
  75. reinit_completion(&line->completion);
  76. /*
  77. * Virtqueue callers need to ensure they don't call its APIs with other
  78. * virtqueue operations at the same time.
  79. */
  80. mutex_lock(&vgpio->lock);
  81. ret = virtqueue_add_sgs(vgpio->request_vq, sgs, 1, 1, line, GFP_KERNEL);
  82. if (ret) {
  83. dev_err(dev, "failed to add request to vq\n");
  84. mutex_unlock(&vgpio->lock);
  85. goto out;
  86. }
  87. virtqueue_kick(vgpio->request_vq);
  88. mutex_unlock(&vgpio->lock);
  89. wait_for_completion(&line->completion);
  90. if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) {
  91. dev_err(dev, "GPIO request failed: %d\n", gpio);
  92. ret = -EINVAL;
  93. goto out;
  94. }
  95. if (unlikely(line->rxlen != rxlen)) {
  96. dev_err(dev, "GPIO operation returned incorrect len (%u : %u)\n",
  97. rxlen, line->rxlen);
  98. ret = -EINVAL;
  99. goto out;
  100. }
  101. if (rxvalue)
  102. *rxvalue = res->value;
  103. out:
  104. mutex_unlock(&line->lock);
  105. return ret;
  106. }
  107. static int virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
  108. u8 txvalue, u8 *rxvalue)
  109. {
  110. struct virtio_gpio_line *line = &vgpio->lines[gpio];
  111. struct virtio_gpio_response *res = &line->res;
  112. return _virtio_gpio_req(vgpio, type, gpio, txvalue, rxvalue, res,
  113. sizeof(*res));
  114. }
  115. static void virtio_gpio_free(struct gpio_chip *gc, unsigned int gpio)
  116. {
  117. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  118. virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio,
  119. VIRTIO_GPIO_DIRECTION_NONE, NULL);
  120. }
  121. static int virtio_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
  122. {
  123. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  124. u8 direction;
  125. int ret;
  126. ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_DIRECTION, gpio, 0,
  127. &direction);
  128. if (ret)
  129. return ret;
  130. switch (direction) {
  131. case VIRTIO_GPIO_DIRECTION_IN:
  132. return GPIO_LINE_DIRECTION_IN;
  133. case VIRTIO_GPIO_DIRECTION_OUT:
  134. return GPIO_LINE_DIRECTION_OUT;
  135. default:
  136. return -EINVAL;
  137. }
  138. }
  139. static int virtio_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
  140. {
  141. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  142. return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio,
  143. VIRTIO_GPIO_DIRECTION_IN, NULL);
  144. }
  145. static int virtio_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
  146. int value)
  147. {
  148. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  149. int ret;
  150. ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL);
  151. if (ret)
  152. return ret;
  153. return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio,
  154. VIRTIO_GPIO_DIRECTION_OUT, NULL);
  155. }
  156. static int virtio_gpio_get(struct gpio_chip *gc, unsigned int gpio)
  157. {
  158. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  159. u8 value;
  160. int ret;
  161. ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_VALUE, gpio, 0, &value);
  162. return ret ? ret : value;
  163. }
  164. static void virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
  165. {
  166. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  167. virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL);
  168. }
  169. /* Interrupt handling */
  170. static void virtio_gpio_irq_prepare(struct virtio_gpio *vgpio, u16 gpio)
  171. {
  172. struct vgpio_irq_line *irq_line = &vgpio->irq_lines[gpio];
  173. struct virtio_gpio_irq_request *ireq = &irq_line->ireq;
  174. struct virtio_gpio_irq_response *ires = &irq_line->ires;
  175. struct scatterlist *sgs[2], req_sg, res_sg;
  176. int ret;
  177. if (WARN_ON(irq_line->queued || irq_line->masked || irq_line->disabled))
  178. return;
  179. ireq->gpio = cpu_to_le16(gpio);
  180. sg_init_one(&req_sg, ireq, sizeof(*ireq));
  181. sg_init_one(&res_sg, ires, sizeof(*ires));
  182. sgs[0] = &req_sg;
  183. sgs[1] = &res_sg;
  184. ret = virtqueue_add_sgs(vgpio->event_vq, sgs, 1, 1, irq_line, GFP_ATOMIC);
  185. if (ret) {
  186. dev_err(&vgpio->vdev->dev, "failed to add request to eventq\n");
  187. return;
  188. }
  189. irq_line->queued = true;
  190. virtqueue_kick(vgpio->event_vq);
  191. }
  192. static void virtio_gpio_irq_enable(struct irq_data *d)
  193. {
  194. struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
  195. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  196. struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
  197. raw_spin_lock(&vgpio->eventq_lock);
  198. irq_line->disabled = false;
  199. irq_line->masked = false;
  200. irq_line->queue_pending = true;
  201. raw_spin_unlock(&vgpio->eventq_lock);
  202. irq_line->update_pending = true;
  203. }
  204. static void virtio_gpio_irq_disable(struct irq_data *d)
  205. {
  206. struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
  207. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  208. struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
  209. raw_spin_lock(&vgpio->eventq_lock);
  210. irq_line->disabled = true;
  211. irq_line->masked = true;
  212. irq_line->queue_pending = false;
  213. raw_spin_unlock(&vgpio->eventq_lock);
  214. irq_line->update_pending = true;
  215. }
  216. static void virtio_gpio_irq_mask(struct irq_data *d)
  217. {
  218. struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
  219. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  220. struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
  221. raw_spin_lock(&vgpio->eventq_lock);
  222. irq_line->masked = true;
  223. raw_spin_unlock(&vgpio->eventq_lock);
  224. }
  225. static void virtio_gpio_irq_unmask(struct irq_data *d)
  226. {
  227. struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
  228. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  229. struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
  230. raw_spin_lock(&vgpio->eventq_lock);
  231. irq_line->masked = false;
  232. /* Queue the buffer unconditionally on unmask */
  233. virtio_gpio_irq_prepare(vgpio, d->hwirq);
  234. raw_spin_unlock(&vgpio->eventq_lock);
  235. }
  236. static int virtio_gpio_irq_set_type(struct irq_data *d, unsigned int type)
  237. {
  238. struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
  239. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  240. struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
  241. switch (type) {
  242. case IRQ_TYPE_EDGE_RISING:
  243. type = VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING;
  244. break;
  245. case IRQ_TYPE_EDGE_FALLING:
  246. type = VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING;
  247. break;
  248. case IRQ_TYPE_EDGE_BOTH:
  249. type = VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH;
  250. break;
  251. case IRQ_TYPE_LEVEL_LOW:
  252. type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW;
  253. break;
  254. case IRQ_TYPE_LEVEL_HIGH:
  255. type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH;
  256. break;
  257. default:
  258. dev_err(&vgpio->vdev->dev, "unsupported irq type: %u\n", type);
  259. return -EINVAL;
  260. }
  261. irq_line->type = type;
  262. irq_line->update_pending = true;
  263. return 0;
  264. }
  265. static void virtio_gpio_irq_bus_lock(struct irq_data *d)
  266. {
  267. struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
  268. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  269. mutex_lock(&vgpio->irq_lock);
  270. }
  271. static void virtio_gpio_irq_bus_sync_unlock(struct irq_data *d)
  272. {
  273. struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
  274. struct virtio_gpio *vgpio = gpiochip_get_data(gc);
  275. struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
  276. u8 type = irq_line->disabled ? VIRTIO_GPIO_IRQ_TYPE_NONE : irq_line->type;
  277. unsigned long flags;
  278. if (irq_line->update_pending) {
  279. irq_line->update_pending = false;
  280. virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_IRQ_TYPE, d->hwirq, type,
  281. NULL);
  282. /* Queue the buffer only after interrupt is enabled */
  283. raw_spin_lock_irqsave(&vgpio->eventq_lock, flags);
  284. if (irq_line->queue_pending) {
  285. irq_line->queue_pending = false;
  286. virtio_gpio_irq_prepare(vgpio, d->hwirq);
  287. }
  288. raw_spin_unlock_irqrestore(&vgpio->eventq_lock, flags);
  289. }
  290. mutex_unlock(&vgpio->irq_lock);
  291. }
  292. static struct irq_chip vgpio_irq_chip = {
  293. .name = "virtio-gpio",
  294. .irq_enable = virtio_gpio_irq_enable,
  295. .irq_disable = virtio_gpio_irq_disable,
  296. .irq_mask = virtio_gpio_irq_mask,
  297. .irq_unmask = virtio_gpio_irq_unmask,
  298. .irq_set_type = virtio_gpio_irq_set_type,
  299. /* These are required to implement irqchip for slow busses */
  300. .irq_bus_lock = virtio_gpio_irq_bus_lock,
  301. .irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock,
  302. };
  303. static bool ignore_irq(struct virtio_gpio *vgpio, int gpio,
  304. struct vgpio_irq_line *irq_line)
  305. {
  306. bool ignore = false;
  307. raw_spin_lock(&vgpio->eventq_lock);
  308. irq_line->queued = false;
  309. /* Interrupt is disabled currently */
  310. if (irq_line->masked || irq_line->disabled) {
  311. ignore = true;
  312. goto unlock;
  313. }
  314. /*
  315. * Buffer is returned as the interrupt was disabled earlier, but is
  316. * enabled again now. Requeue the buffers.
  317. */
  318. if (irq_line->ires.status == VIRTIO_GPIO_IRQ_STATUS_INVALID) {
  319. virtio_gpio_irq_prepare(vgpio, gpio);
  320. ignore = true;
  321. goto unlock;
  322. }
  323. if (WARN_ON(irq_line->ires.status != VIRTIO_GPIO_IRQ_STATUS_VALID))
  324. ignore = true;
  325. unlock:
  326. raw_spin_unlock(&vgpio->eventq_lock);
  327. return ignore;
  328. }
  329. static void virtio_gpio_event_vq(struct virtqueue *vq)
  330. {
  331. struct virtio_gpio *vgpio = vq->vdev->priv;
  332. struct device *dev = &vgpio->vdev->dev;
  333. struct vgpio_irq_line *irq_line;
  334. int gpio, ret;
  335. unsigned int len;
  336. while (true) {
  337. irq_line = virtqueue_get_buf(vgpio->event_vq, &len);
  338. if (!irq_line)
  339. break;
  340. if (len != sizeof(irq_line->ires)) {
  341. dev_err(dev, "irq with incorrect length (%u : %u)\n",
  342. len, (unsigned int)sizeof(irq_line->ires));
  343. continue;
  344. }
  345. /*
  346. * Find GPIO line number from the offset of irq_line within the
  347. * irq_lines block. We can also get GPIO number from
  348. * irq-request, but better not to rely on a buffer returned by
  349. * remote.
  350. */
  351. gpio = irq_line - vgpio->irq_lines;
  352. WARN_ON(gpio >= vgpio->gc.ngpio);
  353. if (unlikely(ignore_irq(vgpio, gpio, irq_line)))
  354. continue;
  355. ret = generic_handle_domain_irq(vgpio->gc.irq.domain, gpio);
  356. if (ret)
  357. dev_err(dev, "failed to handle interrupt: %d\n", ret);
  358. }
  359. }
  360. static void virtio_gpio_request_vq(struct virtqueue *vq)
  361. {
  362. struct virtio_gpio_line *line;
  363. unsigned int len;
  364. do {
  365. line = virtqueue_get_buf(vq, &len);
  366. if (!line)
  367. return;
  368. line->rxlen = len;
  369. complete(&line->completion);
  370. } while (1);
  371. }
  372. static void virtio_gpio_free_vqs(struct virtio_device *vdev)
  373. {
  374. virtio_reset_device(vdev);
  375. vdev->config->del_vqs(vdev);
  376. }
  377. static int virtio_gpio_alloc_vqs(struct virtio_gpio *vgpio,
  378. struct virtio_device *vdev)
  379. {
  380. const char * const names[] = { "requestq", "eventq" };
  381. vq_callback_t *cbs[] = {
  382. virtio_gpio_request_vq,
  383. virtio_gpio_event_vq,
  384. };
  385. struct virtqueue *vqs[2] = { NULL, NULL };
  386. int ret;
  387. ret = virtio_find_vqs(vdev, vgpio->irq_lines ? 2 : 1, vqs, cbs, names, NULL);
  388. if (ret) {
  389. dev_err(&vdev->dev, "failed to find vqs: %d\n", ret);
  390. return ret;
  391. }
  392. if (!vqs[0]) {
  393. dev_err(&vdev->dev, "failed to find requestq vq\n");
  394. goto out;
  395. }
  396. vgpio->request_vq = vqs[0];
  397. if (vgpio->irq_lines && !vqs[1]) {
  398. dev_err(&vdev->dev, "failed to find eventq vq\n");
  399. goto out;
  400. }
  401. vgpio->event_vq = vqs[1];
  402. return 0;
  403. out:
  404. if (vqs[0] || vqs[1])
  405. virtio_gpio_free_vqs(vdev);
  406. return -ENODEV;
  407. }
  408. static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio,
  409. u32 gpio_names_size, u16 ngpio)
  410. {
  411. struct virtio_gpio_response_get_names *res;
  412. struct device *dev = &vgpio->vdev->dev;
  413. u8 *gpio_names, *str;
  414. const char **names;
  415. int i, ret, len;
  416. if (!gpio_names_size)
  417. return NULL;
  418. len = sizeof(*res) + gpio_names_size;
  419. res = devm_kzalloc(dev, len, GFP_KERNEL);
  420. if (!res)
  421. return NULL;
  422. gpio_names = res->value;
  423. ret = _virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_NAMES, 0, 0, NULL,
  424. res, len);
  425. if (ret) {
  426. dev_err(dev, "Failed to get GPIO names: %d\n", ret);
  427. return NULL;
  428. }
  429. names = devm_kcalloc(dev, ngpio, sizeof(*names), GFP_KERNEL);
  430. if (!names)
  431. return NULL;
  432. /* NULL terminate the string instead of checking it */
  433. gpio_names[gpio_names_size - 1] = '\0';
  434. for (i = 0, str = gpio_names; i < ngpio; i++) {
  435. names[i] = str;
  436. str += strlen(str) + 1; /* zero-length strings are allowed */
  437. if (str > gpio_names + gpio_names_size) {
  438. dev_err(dev, "gpio_names block is too short (%d)\n", i);
  439. return NULL;
  440. }
  441. }
  442. return names;
  443. }
  444. static int virtio_gpio_probe(struct virtio_device *vdev)
  445. {
  446. struct virtio_gpio_config config;
  447. struct device *dev = &vdev->dev;
  448. struct virtio_gpio *vgpio;
  449. u32 gpio_names_size;
  450. u16 ngpio;
  451. int ret, i;
  452. vgpio = devm_kzalloc(dev, sizeof(*vgpio), GFP_KERNEL);
  453. if (!vgpio)
  454. return -ENOMEM;
  455. /* Read configuration */
  456. virtio_cread_bytes(vdev, 0, &config, sizeof(config));
  457. gpio_names_size = le32_to_cpu(config.gpio_names_size);
  458. ngpio = le16_to_cpu(config.ngpio);
  459. if (!ngpio) {
  460. dev_err(dev, "Number of GPIOs can't be zero\n");
  461. return -EINVAL;
  462. }
  463. vgpio->lines = devm_kcalloc(dev, ngpio, sizeof(*vgpio->lines), GFP_KERNEL);
  464. if (!vgpio->lines)
  465. return -ENOMEM;
  466. for (i = 0; i < ngpio; i++) {
  467. mutex_init(&vgpio->lines[i].lock);
  468. init_completion(&vgpio->lines[i].completion);
  469. }
  470. mutex_init(&vgpio->lock);
  471. vdev->priv = vgpio;
  472. vgpio->vdev = vdev;
  473. vgpio->gc.free = virtio_gpio_free;
  474. vgpio->gc.get_direction = virtio_gpio_get_direction;
  475. vgpio->gc.direction_input = virtio_gpio_direction_input;
  476. vgpio->gc.direction_output = virtio_gpio_direction_output;
  477. vgpio->gc.get = virtio_gpio_get;
  478. vgpio->gc.set = virtio_gpio_set;
  479. vgpio->gc.ngpio = ngpio;
  480. vgpio->gc.base = -1; /* Allocate base dynamically */
  481. vgpio->gc.label = dev_name(dev);
  482. vgpio->gc.parent = dev;
  483. vgpio->gc.owner = THIS_MODULE;
  484. vgpio->gc.can_sleep = true;
  485. /* Interrupt support */
  486. if (virtio_has_feature(vdev, VIRTIO_GPIO_F_IRQ)) {
  487. vgpio->irq_lines = devm_kcalloc(dev, ngpio, sizeof(*vgpio->irq_lines), GFP_KERNEL);
  488. if (!vgpio->irq_lines)
  489. return -ENOMEM;
  490. /* The event comes from the outside so no parent handler */
  491. vgpio->gc.irq.parent_handler = NULL;
  492. vgpio->gc.irq.num_parents = 0;
  493. vgpio->gc.irq.parents = NULL;
  494. vgpio->gc.irq.default_type = IRQ_TYPE_NONE;
  495. vgpio->gc.irq.handler = handle_level_irq;
  496. vgpio->gc.irq.chip = &vgpio_irq_chip;
  497. for (i = 0; i < ngpio; i++) {
  498. vgpio->irq_lines[i].type = VIRTIO_GPIO_IRQ_TYPE_NONE;
  499. vgpio->irq_lines[i].disabled = true;
  500. vgpio->irq_lines[i].masked = true;
  501. }
  502. mutex_init(&vgpio->irq_lock);
  503. raw_spin_lock_init(&vgpio->eventq_lock);
  504. }
  505. ret = virtio_gpio_alloc_vqs(vgpio, vdev);
  506. if (ret)
  507. return ret;
  508. /* Mark the device ready to perform operations from within probe() */
  509. virtio_device_ready(vdev);
  510. vgpio->gc.names = virtio_gpio_get_names(vgpio, gpio_names_size, ngpio);
  511. ret = gpiochip_add_data(&vgpio->gc, vgpio);
  512. if (ret) {
  513. virtio_gpio_free_vqs(vdev);
  514. dev_err(dev, "Failed to add virtio-gpio controller\n");
  515. }
  516. return ret;
  517. }
  518. static void virtio_gpio_remove(struct virtio_device *vdev)
  519. {
  520. struct virtio_gpio *vgpio = vdev->priv;
  521. gpiochip_remove(&vgpio->gc);
  522. virtio_gpio_free_vqs(vdev);
  523. }
  524. static const struct virtio_device_id id_table[] = {
  525. { VIRTIO_ID_GPIO, VIRTIO_DEV_ANY_ID },
  526. {},
  527. };
  528. MODULE_DEVICE_TABLE(virtio, id_table);
  529. static const unsigned int features[] = {
  530. VIRTIO_GPIO_F_IRQ,
  531. };
  532. static struct virtio_driver virtio_gpio_driver = {
  533. .feature_table = features,
  534. .feature_table_size = ARRAY_SIZE(features),
  535. .id_table = id_table,
  536. .probe = virtio_gpio_probe,
  537. .remove = virtio_gpio_remove,
  538. .driver = {
  539. .name = KBUILD_MODNAME,
  540. .owner = THIS_MODULE,
  541. },
  542. };
  543. module_virtio_driver(virtio_gpio_driver);
  544. MODULE_AUTHOR("Enrico Weigelt, metux IT consult <[email protected]>");
  545. MODULE_AUTHOR("Viresh Kumar <[email protected]>");
  546. MODULE_DESCRIPTION("VirtIO GPIO driver");
  547. MODULE_LICENSE("GPL");