afu_irq.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright 2017 IBM Corp.
  3. #include <linux/interrupt.h>
  4. #include <linux/irqdomain.h>
  5. #include <asm/pnv-ocxl.h>
  6. #include <asm/xive.h>
  7. #include "ocxl_internal.h"
  8. #include "trace.h"
  9. struct afu_irq {
  10. int id;
  11. int hw_irq;
  12. unsigned int virq;
  13. char *name;
  14. irqreturn_t (*handler)(void *private);
  15. void (*free_private)(void *private);
  16. void *private;
  17. };
  18. int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset)
  19. {
  20. return (offset - ctx->afu->irq_base_offset) >> PAGE_SHIFT;
  21. }
  22. u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id)
  23. {
  24. return ctx->afu->irq_base_offset + (irq_id << PAGE_SHIFT);
  25. }
  26. int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id,
  27. irqreturn_t (*handler)(void *private),
  28. void (*free_private)(void *private),
  29. void *private)
  30. {
  31. struct afu_irq *irq;
  32. int rc;
  33. mutex_lock(&ctx->irq_lock);
  34. irq = idr_find(&ctx->irq_idr, irq_id);
  35. if (!irq) {
  36. rc = -EINVAL;
  37. goto unlock;
  38. }
  39. irq->handler = handler;
  40. irq->private = private;
  41. irq->free_private = free_private;
  42. rc = 0;
  43. // Fall through to unlock
  44. unlock:
  45. mutex_unlock(&ctx->irq_lock);
  46. return rc;
  47. }
  48. EXPORT_SYMBOL_GPL(ocxl_irq_set_handler);
  49. static irqreturn_t afu_irq_handler(int virq, void *data)
  50. {
  51. struct afu_irq *irq = (struct afu_irq *) data;
  52. trace_ocxl_afu_irq_receive(virq);
  53. if (irq->handler)
  54. return irq->handler(irq->private);
  55. return IRQ_HANDLED; // Just drop it on the ground
  56. }
  57. static int setup_afu_irq(struct ocxl_context *ctx, struct afu_irq *irq)
  58. {
  59. int rc;
  60. irq->virq = irq_create_mapping(NULL, irq->hw_irq);
  61. if (!irq->virq) {
  62. pr_err("irq_create_mapping failed\n");
  63. return -ENOMEM;
  64. }
  65. pr_debug("hw_irq %d mapped to virq %u\n", irq->hw_irq, irq->virq);
  66. irq->name = kasprintf(GFP_KERNEL, "ocxl-afu-%u", irq->virq);
  67. if (!irq->name) {
  68. irq_dispose_mapping(irq->virq);
  69. return -ENOMEM;
  70. }
  71. rc = request_irq(irq->virq, afu_irq_handler, 0, irq->name, irq);
  72. if (rc) {
  73. kfree(irq->name);
  74. irq->name = NULL;
  75. irq_dispose_mapping(irq->virq);
  76. pr_err("request_irq failed: %d\n", rc);
  77. return rc;
  78. }
  79. return 0;
  80. }
  81. static void release_afu_irq(struct afu_irq *irq)
  82. {
  83. free_irq(irq->virq, irq);
  84. irq_dispose_mapping(irq->virq);
  85. kfree(irq->name);
  86. }
  87. int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id)
  88. {
  89. struct afu_irq *irq;
  90. int rc;
  91. irq = kzalloc(sizeof(struct afu_irq), GFP_KERNEL);
  92. if (!irq)
  93. return -ENOMEM;
  94. /*
  95. * We limit the number of afu irqs per context and per link to
  96. * avoid a single process or user depleting the pool of IPIs
  97. */
  98. mutex_lock(&ctx->irq_lock);
  99. irq->id = idr_alloc(&ctx->irq_idr, irq, 0, MAX_IRQ_PER_CONTEXT,
  100. GFP_KERNEL);
  101. if (irq->id < 0) {
  102. rc = -ENOSPC;
  103. goto err_unlock;
  104. }
  105. rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq);
  106. if (rc)
  107. goto err_idr;
  108. rc = setup_afu_irq(ctx, irq);
  109. if (rc)
  110. goto err_alloc;
  111. trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq);
  112. mutex_unlock(&ctx->irq_lock);
  113. *irq_id = irq->id;
  114. return 0;
  115. err_alloc:
  116. ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq);
  117. err_idr:
  118. idr_remove(&ctx->irq_idr, irq->id);
  119. err_unlock:
  120. mutex_unlock(&ctx->irq_lock);
  121. kfree(irq);
  122. return rc;
  123. }
  124. EXPORT_SYMBOL_GPL(ocxl_afu_irq_alloc);
  125. static void afu_irq_free(struct afu_irq *irq, struct ocxl_context *ctx)
  126. {
  127. trace_ocxl_afu_irq_free(ctx->pasid, irq->id);
  128. if (ctx->mapping)
  129. unmap_mapping_range(ctx->mapping,
  130. ocxl_irq_id_to_offset(ctx, irq->id),
  131. 1 << PAGE_SHIFT, 1);
  132. release_afu_irq(irq);
  133. if (irq->free_private)
  134. irq->free_private(irq->private);
  135. ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq);
  136. kfree(irq);
  137. }
  138. int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id)
  139. {
  140. struct afu_irq *irq;
  141. mutex_lock(&ctx->irq_lock);
  142. irq = idr_find(&ctx->irq_idr, irq_id);
  143. if (!irq) {
  144. mutex_unlock(&ctx->irq_lock);
  145. return -EINVAL;
  146. }
  147. idr_remove(&ctx->irq_idr, irq->id);
  148. afu_irq_free(irq, ctx);
  149. mutex_unlock(&ctx->irq_lock);
  150. return 0;
  151. }
  152. EXPORT_SYMBOL_GPL(ocxl_afu_irq_free);
  153. void ocxl_afu_irq_free_all(struct ocxl_context *ctx)
  154. {
  155. struct afu_irq *irq;
  156. int id;
  157. mutex_lock(&ctx->irq_lock);
  158. idr_for_each_entry(&ctx->irq_idr, irq, id)
  159. afu_irq_free(irq, ctx);
  160. mutex_unlock(&ctx->irq_lock);
  161. }
  162. u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id)
  163. {
  164. struct xive_irq_data *xd;
  165. struct afu_irq *irq;
  166. u64 addr = 0;
  167. mutex_lock(&ctx->irq_lock);
  168. irq = idr_find(&ctx->irq_idr, irq_id);
  169. if (irq) {
  170. xd = irq_get_handler_data(irq->virq);
  171. addr = xd ? xd->trig_page : 0;
  172. }
  173. mutex_unlock(&ctx->irq_lock);
  174. return addr;
  175. }
  176. EXPORT_SYMBOL_GPL(ocxl_afu_irq_get_addr);