qla_dfs.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2014 QLogic Corporation
  5. */
  6. #include "qla_def.h"
  7. #include <linux/debugfs.h>
  8. #include <linux/seq_file.h>
  9. static struct dentry *qla2x00_dfs_root;
  10. static atomic_t qla2x00_dfs_root_count;
  11. #define QLA_DFS_RPORT_DEVLOSS_TMO 1
  12. static int
  13. qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
  14. {
  15. switch (attr_id) {
  16. case QLA_DFS_RPORT_DEVLOSS_TMO:
  17. /* Only supported for FC-NVMe devices that are registered. */
  18. if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
  19. return -EIO;
  20. *val = fp->nvme_remote_port->dev_loss_tmo;
  21. break;
  22. default:
  23. return -EINVAL;
  24. }
  25. return 0;
  26. }
  27. static int
  28. qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
  29. {
  30. switch (attr_id) {
  31. case QLA_DFS_RPORT_DEVLOSS_TMO:
  32. /* Only supported for FC-NVMe devices that are registered. */
  33. if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
  34. return -EIO;
  35. #if (IS_ENABLED(CONFIG_NVME_FC))
  36. return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
  37. val);
  38. #else /* CONFIG_NVME_FC */
  39. return -EINVAL;
  40. #endif /* CONFIG_NVME_FC */
  41. default:
  42. return -EINVAL;
  43. }
  44. return 0;
  45. }
  46. #define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \
  47. static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \
  48. { \
  49. struct fc_port *fp = data; \
  50. return qla_dfs_rport_get(fp, _attr_id, val); \
  51. } \
  52. static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \
  53. { \
  54. struct fc_port *fp = data; \
  55. return qla_dfs_rport_set(fp, _attr_id, val); \
  56. } \
  57. DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \
  58. qla_dfs_rport_##_attr##_get, \
  59. qla_dfs_rport_##_attr##_set, "%llu\n")
  60. /*
  61. * Wrapper for getting fc_port fields.
  62. *
  63. * _attr : Attribute name.
  64. * _get_val : Accessor macro to retrieve the value.
  65. */
  66. #define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \
  67. static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \
  68. { \
  69. struct fc_port *fp = data; \
  70. *val = _get_val; \
  71. return 0; \
  72. } \
  73. DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \
  74. qla_dfs_rport_field_##_attr##_get, \
  75. NULL, "%llu\n")
  76. #define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
  77. DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
  78. #define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
  79. DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
  80. DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
  81. DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
  82. DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
  83. DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
  84. DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
  85. DEFINE_QLA_DFS_RPORT_FIELD(flags);
  86. DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
  87. DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
  88. DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
  89. DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
  90. DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
  91. DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
  92. DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
  93. void
  94. qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
  95. {
  96. char wwn[32];
  97. #define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \
  98. debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \
  99. fp, &qla_dfs_rport_field_##_attr##_fops)
  100. if (!vha->dfs_rport_root || fp->dfs_rport_dir)
  101. return;
  102. sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
  103. fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
  104. if (IS_ERR(fp->dfs_rport_dir))
  105. return;
  106. if (NVME_TARGET(vha->hw, fp))
  107. debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
  108. fp, &qla_dfs_rport_dev_loss_tmo_fops);
  109. QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
  110. QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
  111. QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
  112. QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
  113. QLA_CREATE_RPORT_FIELD_ATTR(flags);
  114. QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
  115. QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
  116. QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
  117. QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
  118. QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
  119. QLA_CREATE_RPORT_FIELD_ATTR(port_id);
  120. QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
  121. }
  122. void
  123. qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
  124. {
  125. if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
  126. return;
  127. debugfs_remove_recursive(fp->dfs_rport_dir);
  128. fp->dfs_rport_dir = NULL;
  129. }
  130. static int
  131. qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
  132. {
  133. scsi_qla_host_t *vha = s->private;
  134. struct qla_hw_data *ha = vha->hw;
  135. unsigned long flags;
  136. struct fc_port *sess = NULL;
  137. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  138. seq_printf(s, "%s\n", vha->host_str);
  139. if (tgt) {
  140. seq_puts(s, "Port ID Port Name Handle\n");
  141. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  142. list_for_each_entry(sess, &vha->vp_fcports, list)
  143. seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
  144. sess->d_id.b.domain, sess->d_id.b.area,
  145. sess->d_id.b.al_pa, sess->port_name,
  146. sess->loop_id);
  147. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  148. }
  149. return 0;
  150. }
  151. DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess);
  152. static int
  153. qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
  154. {
  155. scsi_qla_host_t *vha = s->private;
  156. struct qla_hw_data *ha = vha->hw;
  157. struct gid_list_info *gid_list;
  158. dma_addr_t gid_list_dma;
  159. fc_port_t fc_port;
  160. char *id_iter;
  161. int rc, i;
  162. uint16_t entries, loop_id;
  163. seq_printf(s, "%s\n", vha->host_str);
  164. gid_list = dma_alloc_coherent(&ha->pdev->dev,
  165. qla2x00_gid_list_size(ha),
  166. &gid_list_dma, GFP_KERNEL);
  167. if (!gid_list) {
  168. ql_dbg(ql_dbg_user, vha, 0x7018,
  169. "DMA allocation failed for %u\n",
  170. qla2x00_gid_list_size(ha));
  171. return 0;
  172. }
  173. rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
  174. &entries);
  175. if (rc != QLA_SUCCESS)
  176. goto out_free_id_list;
  177. id_iter = (char *)gid_list;
  178. seq_puts(s, "Port Name Port ID Loop ID\n");
  179. for (i = 0; i < entries; i++) {
  180. struct gid_list_info *gid =
  181. (struct gid_list_info *)id_iter;
  182. loop_id = le16_to_cpu(gid->loop_id);
  183. memset(&fc_port, 0, sizeof(fc_port_t));
  184. fc_port.loop_id = loop_id;
  185. rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
  186. seq_printf(s, "%8phC %02x%02x%02x %d\n",
  187. fc_port.port_name, fc_port.d_id.b.domain,
  188. fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
  189. fc_port.loop_id);
  190. id_iter += ha->gid_list_info_size;
  191. }
  192. out_free_id_list:
  193. dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  194. gid_list, gid_list_dma);
  195. return 0;
  196. }
  197. DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database);
  198. static int
  199. qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
  200. {
  201. struct scsi_qla_host *vha = s->private;
  202. uint16_t mb[MAX_IOCB_MB_REG];
  203. int rc;
  204. struct qla_hw_data *ha = vha->hw;
  205. u16 iocbs_used, i, exch_used;
  206. rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
  207. if (rc != QLA_SUCCESS) {
  208. seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
  209. } else {
  210. seq_puts(s, "FW Resource count\n\n");
  211. seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
  212. seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
  213. seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
  214. seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
  215. seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
  216. seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
  217. seq_printf(s, "MAX VP count[%d]\n", mb[11]);
  218. seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
  219. seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
  220. mb[20]);
  221. seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
  222. mb[21]);
  223. seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
  224. mb[22]);
  225. seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
  226. mb[23]);
  227. }
  228. if (ql2xenforce_iocb_limit) {
  229. /* lock is not require. It's an estimate. */
  230. iocbs_used = ha->base_qpair->fwres.iocbs_used;
  231. exch_used = ha->base_qpair->fwres.exch_used;
  232. for (i = 0; i < ha->max_qpairs; i++) {
  233. if (ha->queue_pair_map[i]) {
  234. iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
  235. exch_used += ha->queue_pair_map[i]->fwres.exch_used;
  236. }
  237. }
  238. seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
  239. iocbs_used, ha->base_qpair->fwres.iocbs_limit);
  240. seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
  241. exch_used, ha->base_qpair->fwres.exch_limit);
  242. if (ql2xenforce_iocb_limit == 2) {
  243. iocbs_used = atomic_read(&ha->fwres.iocb_used);
  244. exch_used = atomic_read(&ha->fwres.exch_used);
  245. seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n",
  246. iocbs_used, ha->fwres.iocb_limit);
  247. seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n",
  248. exch_used, ha->fwres.exch_limit);
  249. }
  250. }
  251. return 0;
  252. }
  253. DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt);
  254. static int
  255. qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
  256. {
  257. struct scsi_qla_host *vha = s->private;
  258. struct qla_qpair *qpair = vha->hw->base_qpair;
  259. uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
  260. core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
  261. num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
  262. u16 i;
  263. fc_port_t *fcport = NULL;
  264. if (qla2x00_chip_is_down(vha))
  265. return 0;
  266. qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
  267. core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
  268. qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
  269. core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
  270. qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
  271. core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
  272. num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
  273. num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
  274. num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
  275. for (i = 0; i < vha->hw->max_qpairs; i++) {
  276. qpair = vha->hw->queue_pair_map[i];
  277. if (!qpair)
  278. continue;
  279. qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
  280. core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
  281. qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
  282. core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
  283. qla_core_ret_sta_ctio +=
  284. qpair->tgt_counters.qla_core_ret_sta_ctio;
  285. core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
  286. num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
  287. num_alloc_iocb_failed +=
  288. qpair->tgt_counters.num_alloc_iocb_failed;
  289. num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
  290. }
  291. seq_puts(s, "Target Counters\n");
  292. seq_printf(s, "qla_core_sbt_cmd = %lld\n",
  293. qla_core_sbt_cmd);
  294. seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
  295. qla_core_ret_sta_ctio);
  296. seq_printf(s, "qla_core_ret_ctio = %lld\n",
  297. qla_core_ret_ctio);
  298. seq_printf(s, "core_qla_que_buf = %lld\n",
  299. core_qla_que_buf);
  300. seq_printf(s, "core_qla_snd_status = %lld\n",
  301. core_qla_snd_status);
  302. seq_printf(s, "core_qla_free_cmd = %lld\n",
  303. core_qla_free_cmd);
  304. seq_printf(s, "num alloc iocb failed = %lld\n",
  305. num_alloc_iocb_failed);
  306. seq_printf(s, "num term exchange sent = %lld\n",
  307. num_term_xchg_sent);
  308. seq_printf(s, "num Q full sent = %lld\n",
  309. num_q_full_sent);
  310. /* DIF stats */
  311. seq_printf(s, "DIF Inp Bytes = %lld\n",
  312. vha->qla_stats.qla_dif_stats.dif_input_bytes);
  313. seq_printf(s, "DIF Outp Bytes = %lld\n",
  314. vha->qla_stats.qla_dif_stats.dif_output_bytes);
  315. seq_printf(s, "DIF Inp Req = %lld\n",
  316. vha->qla_stats.qla_dif_stats.dif_input_requests);
  317. seq_printf(s, "DIF Outp Req = %lld\n",
  318. vha->qla_stats.qla_dif_stats.dif_output_requests);
  319. seq_printf(s, "DIF Guard err = %d\n",
  320. vha->qla_stats.qla_dif_stats.dif_guard_err);
  321. seq_printf(s, "DIF Ref tag err = %d\n",
  322. vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
  323. seq_printf(s, "DIF App tag err = %d\n",
  324. vha->qla_stats.qla_dif_stats.dif_app_tag_err);
  325. seq_puts(s, "\n");
  326. seq_puts(s, "Initiator Error Counters\n");
  327. seq_printf(s, "HW Error Count = %14lld\n",
  328. vha->hw_err_cnt);
  329. seq_printf(s, "Link Down Count = %14lld\n",
  330. vha->short_link_down_cnt);
  331. seq_printf(s, "Interface Err Count = %14lld\n",
  332. vha->interface_err_cnt);
  333. seq_printf(s, "Cmd Timeout Count = %14lld\n",
  334. vha->cmd_timeout_cnt);
  335. seq_printf(s, "Reset Count = %14lld\n",
  336. vha->reset_cmd_err_cnt);
  337. seq_puts(s, "\n");
  338. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  339. if (!fcport->rport)
  340. continue;
  341. seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n",
  342. fcport->rport->number, fcport->tgt_short_link_down_cnt);
  343. }
  344. seq_puts(s, "\n");
  345. return 0;
  346. }
  347. DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters);
  348. static int
  349. qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
  350. {
  351. scsi_qla_host_t *vha = s->private;
  352. uint32_t cnt;
  353. uint32_t *fce;
  354. uint64_t fce_start;
  355. struct qla_hw_data *ha = vha->hw;
  356. mutex_lock(&ha->fce_mutex);
  357. seq_puts(s, "FCE Trace Buffer\n");
  358. seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
  359. seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
  360. seq_puts(s, "FCE Enable Registers\n");
  361. seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
  362. ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
  363. ha->fce_mb[5], ha->fce_mb[6]);
  364. fce = (uint32_t *) ha->fce;
  365. fce_start = (unsigned long long) ha->fce_dma;
  366. for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
  367. if (cnt % 8 == 0)
  368. seq_printf(s, "\n%llx: ",
  369. (unsigned long long)((cnt * 4) + fce_start));
  370. else
  371. seq_putc(s, ' ');
  372. seq_printf(s, "%08x", *fce++);
  373. }
  374. seq_puts(s, "\nEnd\n");
  375. mutex_unlock(&ha->fce_mutex);
  376. return 0;
  377. }
  378. static int
  379. qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
  380. {
  381. scsi_qla_host_t *vha = inode->i_private;
  382. struct qla_hw_data *ha = vha->hw;
  383. int rval;
  384. if (!ha->flags.fce_enabled)
  385. goto out;
  386. mutex_lock(&ha->fce_mutex);
  387. /* Pause tracing to flush FCE buffers. */
  388. rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
  389. if (rval)
  390. ql_dbg(ql_dbg_user, vha, 0x705c,
  391. "DebugFS: Unable to disable FCE (%d).\n", rval);
  392. ha->flags.fce_enabled = 0;
  393. mutex_unlock(&ha->fce_mutex);
  394. out:
  395. return single_open(file, qla2x00_dfs_fce_show, vha);
  396. }
  397. static int
  398. qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
  399. {
  400. scsi_qla_host_t *vha = inode->i_private;
  401. struct qla_hw_data *ha = vha->hw;
  402. int rval;
  403. if (ha->flags.fce_enabled)
  404. goto out;
  405. mutex_lock(&ha->fce_mutex);
  406. /* Re-enable FCE tracing. */
  407. ha->flags.fce_enabled = 1;
  408. memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
  409. rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
  410. ha->fce_mb, &ha->fce_bufs);
  411. if (rval) {
  412. ql_dbg(ql_dbg_user, vha, 0x700d,
  413. "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
  414. ha->flags.fce_enabled = 0;
  415. }
  416. mutex_unlock(&ha->fce_mutex);
  417. out:
  418. return single_release(inode, file);
  419. }
  420. static const struct file_operations dfs_fce_ops = {
  421. .open = qla2x00_dfs_fce_open,
  422. .read = seq_read,
  423. .llseek = seq_lseek,
  424. .release = qla2x00_dfs_fce_release,
  425. };
  426. static int
  427. qla_dfs_naqp_show(struct seq_file *s, void *unused)
  428. {
  429. struct scsi_qla_host *vha = s->private;
  430. struct qla_hw_data *ha = vha->hw;
  431. seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
  432. return 0;
  433. }
  434. /*
  435. * Helper macros for setting up debugfs entries.
  436. * _name: The name of the debugfs entry
  437. * _ctx_struct: The context that was passed when creating the debugfs file
  438. *
  439. * QLA_DFS_SETUP_RD could be used when there is only a show function.
  440. * - show function take the name qla_dfs_<sysfs-name>_show
  441. *
  442. * QLA_DFS_SETUP_RW could be used when there are both show and write functions.
  443. * - show function take the name qla_dfs_<sysfs-name>_show
  444. * - write function take the name qla_dfs_<sysfs-name>_write
  445. *
  446. * To have a new debugfs entry, do:
  447. * 1. Create a "struct dentry *" in the appropriate structure in the format
  448. * dfs_<sysfs-name>
  449. * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW
  450. * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE
  451. * or QLA_DFS_ROOT_CREATE_FILE
  452. * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE
  453. * or QLA_DFS_ROOT_REMOVE_FILE
  454. *
  455. * Example for creating "TEST" sysfs file:
  456. * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; }
  457. * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t);
  458. * 3. In qla2x00_dfs_setup():
  459. * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha);
  460. * 4. In qla2x00_dfs_remove():
  461. * QLA_DFS_REMOVE_FILE(ha, TEST);
  462. */
  463. #define QLA_DFS_SETUP_RD(_name, _ctx_struct) \
  464. static int \
  465. qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
  466. { \
  467. _ctx_struct *__ctx = inode->i_private; \
  468. \
  469. return single_open(file, qla_dfs_##_name##_show, __ctx); \
  470. } \
  471. \
  472. static const struct file_operations qla_dfs_##_name##_ops = { \
  473. .open = qla_dfs_##_name##_open, \
  474. .read = seq_read, \
  475. .llseek = seq_lseek, \
  476. .release = single_release, \
  477. };
  478. #define QLA_DFS_SETUP_RW(_name, _ctx_struct) \
  479. static int \
  480. qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
  481. { \
  482. _ctx_struct *__ctx = inode->i_private; \
  483. \
  484. return single_open(file, qla_dfs_##_name##_show, __ctx); \
  485. } \
  486. \
  487. static const struct file_operations qla_dfs_##_name##_ops = { \
  488. .open = qla_dfs_##_name##_open, \
  489. .read = seq_read, \
  490. .llseek = seq_lseek, \
  491. .release = single_release, \
  492. .write = qla_dfs_##_name##_write, \
  493. };
  494. #define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \
  495. do { \
  496. if (!qla_dfs_##_name) \
  497. qla_dfs_##_name = debugfs_create_file(#_name, \
  498. _perm, qla2x00_dfs_root, _ctx, \
  499. &qla_dfs_##_name##_ops); \
  500. } while (0)
  501. #define QLA_DFS_ROOT_REMOVE_FILE(_name) \
  502. do { \
  503. if (qla_dfs_##_name) { \
  504. debugfs_remove(qla_dfs_##_name); \
  505. qla_dfs_##_name = NULL; \
  506. } \
  507. } while (0)
  508. #define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx) \
  509. do { \
  510. (_struct)->dfs_##_name = debugfs_create_file(#_name, \
  511. _perm, _parent, _ctx, \
  512. &qla_dfs_##_name##_ops) \
  513. } while (0)
  514. #define QLA_DFS_REMOVE_FILE(_struct, _name) \
  515. do { \
  516. if ((_struct)->dfs_##_name) { \
  517. debugfs_remove((_struct)->dfs_##_name); \
  518. (_struct)->dfs_##_name = NULL; \
  519. } \
  520. } while (0)
  521. static int
  522. qla_dfs_naqp_open(struct inode *inode, struct file *file)
  523. {
  524. struct scsi_qla_host *vha = inode->i_private;
  525. return single_open(file, qla_dfs_naqp_show, vha);
  526. }
  527. static ssize_t
  528. qla_dfs_naqp_write(struct file *file, const char __user *buffer,
  529. size_t count, loff_t *pos)
  530. {
  531. struct seq_file *s = file->private_data;
  532. struct scsi_qla_host *vha = s->private;
  533. struct qla_hw_data *ha = vha->hw;
  534. char *buf;
  535. int rc = 0;
  536. unsigned long num_act_qp;
  537. if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
  538. pr_err("host%ld: this adapter does not support Multi Q.",
  539. vha->host_no);
  540. return -EINVAL;
  541. }
  542. if (!vha->flags.qpairs_available) {
  543. pr_err("host%ld: Driver is not setup with Multi Q.",
  544. vha->host_no);
  545. return -EINVAL;
  546. }
  547. buf = memdup_user_nul(buffer, count);
  548. if (IS_ERR(buf)) {
  549. pr_err("host%ld: fail to copy user buffer.",
  550. vha->host_no);
  551. return PTR_ERR(buf);
  552. }
  553. num_act_qp = simple_strtoul(buf, NULL, 0);
  554. if (num_act_qp >= vha->hw->max_qpairs) {
  555. pr_err("User set invalid number of qpairs %lu. Max = %d",
  556. num_act_qp, vha->hw->max_qpairs);
  557. rc = -EINVAL;
  558. goto out_free;
  559. }
  560. if (num_act_qp != ha->tgt.num_act_qpairs) {
  561. ha->tgt.num_act_qpairs = num_act_qp;
  562. qlt_clr_qp_table(vha);
  563. }
  564. rc = count;
  565. out_free:
  566. kfree(buf);
  567. return rc;
  568. }
  569. static const struct file_operations dfs_naqp_ops = {
  570. .open = qla_dfs_naqp_open,
  571. .read = seq_read,
  572. .llseek = seq_lseek,
  573. .release = single_release,
  574. .write = qla_dfs_naqp_write,
  575. };
  576. int
  577. qla2x00_dfs_setup(scsi_qla_host_t *vha)
  578. {
  579. struct qla_hw_data *ha = vha->hw;
  580. if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  581. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  582. goto out;
  583. if (!ha->fce)
  584. goto out;
  585. if (qla2x00_dfs_root)
  586. goto create_dir;
  587. atomic_set(&qla2x00_dfs_root_count, 0);
  588. qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
  589. create_dir:
  590. if (ha->dfs_dir)
  591. goto create_nodes;
  592. mutex_init(&ha->fce_mutex);
  593. ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
  594. atomic_inc(&qla2x00_dfs_root_count);
  595. create_nodes:
  596. ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
  597. S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops);
  598. ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
  599. ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops);
  600. ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
  601. S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops);
  602. ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
  603. &dfs_fce_ops);
  604. ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
  605. S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops);
  606. if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
  607. ha->tgt.dfs_naqp = debugfs_create_file("naqp",
  608. 0400, ha->dfs_dir, vha, &dfs_naqp_ops);
  609. if (IS_ERR(ha->tgt.dfs_naqp)) {
  610. ql_log(ql_log_warn, vha, 0xd011,
  611. "Unable to create debugFS naqp node.\n");
  612. goto out;
  613. }
  614. }
  615. vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
  616. if (IS_ERR(vha->dfs_rport_root)) {
  617. ql_log(ql_log_warn, vha, 0xd012,
  618. "Unable to create debugFS rports node.\n");
  619. goto out;
  620. }
  621. out:
  622. return 0;
  623. }
  624. int
  625. qla2x00_dfs_remove(scsi_qla_host_t *vha)
  626. {
  627. struct qla_hw_data *ha = vha->hw;
  628. if (ha->tgt.dfs_naqp) {
  629. debugfs_remove(ha->tgt.dfs_naqp);
  630. ha->tgt.dfs_naqp = NULL;
  631. }
  632. if (ha->tgt.dfs_tgt_sess) {
  633. debugfs_remove(ha->tgt.dfs_tgt_sess);
  634. ha->tgt.dfs_tgt_sess = NULL;
  635. }
  636. if (ha->tgt.dfs_tgt_port_database) {
  637. debugfs_remove(ha->tgt.dfs_tgt_port_database);
  638. ha->tgt.dfs_tgt_port_database = NULL;
  639. }
  640. if (ha->dfs_fw_resource_cnt) {
  641. debugfs_remove(ha->dfs_fw_resource_cnt);
  642. ha->dfs_fw_resource_cnt = NULL;
  643. }
  644. if (ha->dfs_tgt_counters) {
  645. debugfs_remove(ha->dfs_tgt_counters);
  646. ha->dfs_tgt_counters = NULL;
  647. }
  648. if (ha->dfs_fce) {
  649. debugfs_remove(ha->dfs_fce);
  650. ha->dfs_fce = NULL;
  651. }
  652. if (vha->dfs_rport_root) {
  653. debugfs_remove_recursive(vha->dfs_rport_root);
  654. vha->dfs_rport_root = NULL;
  655. }
  656. if (ha->dfs_dir) {
  657. debugfs_remove(ha->dfs_dir);
  658. ha->dfs_dir = NULL;
  659. atomic_dec(&qla2x00_dfs_root_count);
  660. }
  661. if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
  662. qla2x00_dfs_root) {
  663. debugfs_remove(qla2x00_dfs_root);
  664. qla2x00_dfs_root = NULL;
  665. }
  666. return 0;
  667. }