efct_io.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
  4. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  5. */
  6. #include "efct_driver.h"
  7. #include "efct_hw.h"
  8. #include "efct_io.h"
  9. struct efct_io_pool {
  10. struct efct *efct;
  11. spinlock_t lock; /* IO pool lock */
  12. u32 io_num_ios; /* Total IOs allocated */
  13. struct efct_io *ios[EFCT_NUM_SCSI_IOS];
  14. struct list_head freelist;
  15. };
  16. struct efct_io_pool *
  17. efct_io_pool_create(struct efct *efct, u32 num_sgl)
  18. {
  19. u32 i = 0;
  20. struct efct_io_pool *io_pool;
  21. struct efct_io *io;
  22. /* Allocate the IO pool */
  23. io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL);
  24. if (!io_pool)
  25. return NULL;
  26. io_pool->efct = efct;
  27. INIT_LIST_HEAD(&io_pool->freelist);
  28. /* initialize IO pool lock */
  29. spin_lock_init(&io_pool->lock);
  30. for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) {
  31. io = kzalloc(sizeof(*io), GFP_KERNEL);
  32. if (!io)
  33. break;
  34. io_pool->io_num_ios++;
  35. io_pool->ios[i] = io;
  36. io->tag = i;
  37. io->instance_index = i;
  38. /* Allocate a response buffer */
  39. io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
  40. io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
  41. io->rspbuf.size,
  42. &io->rspbuf.phys, GFP_KERNEL);
  43. if (!io->rspbuf.virt) {
  44. efc_log_err(efct, "dma_alloc rspbuf failed\n");
  45. efct_io_pool_free(io_pool);
  46. return NULL;
  47. }
  48. /* Allocate SGL */
  49. io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL);
  50. if (!io->sgl) {
  51. efct_io_pool_free(io_pool);
  52. return NULL;
  53. }
  54. io->sgl_allocated = num_sgl;
  55. io->sgl_count = 0;
  56. INIT_LIST_HEAD(&io->list_entry);
  57. list_add_tail(&io->list_entry, &io_pool->freelist);
  58. }
  59. return io_pool;
  60. }
  61. int
  62. efct_io_pool_free(struct efct_io_pool *io_pool)
  63. {
  64. struct efct *efct;
  65. u32 i;
  66. struct efct_io *io;
  67. if (io_pool) {
  68. efct = io_pool->efct;
  69. for (i = 0; i < io_pool->io_num_ios; i++) {
  70. io = io_pool->ios[i];
  71. if (!io)
  72. continue;
  73. kfree(io->sgl);
  74. dma_free_coherent(&efct->pci->dev,
  75. io->rspbuf.size, io->rspbuf.virt,
  76. io->rspbuf.phys);
  77. memset(&io->rspbuf, 0, sizeof(struct efc_dma));
  78. }
  79. kfree(io_pool);
  80. efct->xport->io_pool = NULL;
  81. }
  82. return 0;
  83. }
  84. struct efct_io *
  85. efct_io_pool_io_alloc(struct efct_io_pool *io_pool)
  86. {
  87. struct efct_io *io = NULL;
  88. struct efct *efct;
  89. unsigned long flags = 0;
  90. efct = io_pool->efct;
  91. spin_lock_irqsave(&io_pool->lock, flags);
  92. if (!list_empty(&io_pool->freelist)) {
  93. io = list_first_entry(&io_pool->freelist, struct efct_io,
  94. list_entry);
  95. list_del_init(&io->list_entry);
  96. }
  97. spin_unlock_irqrestore(&io_pool->lock, flags);
  98. if (!io)
  99. return NULL;
  100. io->io_type = EFCT_IO_TYPE_MAX;
  101. io->hio_type = EFCT_HW_IO_MAX;
  102. io->hio = NULL;
  103. io->transferred = 0;
  104. io->efct = efct;
  105. io->timeout = 0;
  106. io->sgl_count = 0;
  107. io->tgt_task_tag = 0;
  108. io->init_task_tag = 0;
  109. io->hw_tag = 0;
  110. io->display_name = "pending";
  111. io->seq_init = 0;
  112. io->io_free = 0;
  113. io->release = NULL;
  114. atomic_add_return(1, &efct->xport->io_active_count);
  115. atomic_add_return(1, &efct->xport->io_total_alloc);
  116. return io;
  117. }
  118. /* Free an object used to track an IO */
  119. void
  120. efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io)
  121. {
  122. struct efct *efct;
  123. struct efct_hw_io *hio = NULL;
  124. unsigned long flags = 0;
  125. efct = io_pool->efct;
  126. spin_lock_irqsave(&io_pool->lock, flags);
  127. hio = io->hio;
  128. io->hio = NULL;
  129. io->io_free = 1;
  130. INIT_LIST_HEAD(&io->list_entry);
  131. list_add(&io->list_entry, &io_pool->freelist);
  132. spin_unlock_irqrestore(&io_pool->lock, flags);
  133. if (hio)
  134. efct_hw_io_free(&efct->hw, hio);
  135. atomic_sub_return(1, &efct->xport->io_active_count);
  136. atomic_add_return(1, &efct->xport->io_total_free);
  137. }
  138. /* Find an I/O given it's node and ox_id */
  139. struct efct_io *
  140. efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
  141. u16 ox_id, u16 rx_id)
  142. {
  143. struct efct_io *io = NULL;
  144. unsigned long flags = 0;
  145. u8 found = false;
  146. spin_lock_irqsave(&node->active_ios_lock, flags);
  147. list_for_each_entry(io, &node->active_ios, list_entry) {
  148. if ((io->cmd_tgt && io->init_task_tag == ox_id) &&
  149. (rx_id == 0xffff || io->tgt_task_tag == rx_id)) {
  150. if (kref_get_unless_zero(&io->ref))
  151. found = true;
  152. break;
  153. }
  154. }
  155. spin_unlock_irqrestore(&node->active_ios_lock, flags);
  156. return found ? io : NULL;
  157. }