mpi3mr_app.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Driver for Broadcom MPI3 Storage Controllers
  4. *
  5. * Copyright (C) 2017-2022 Broadcom Inc.
  6. * (mailto: [email protected])
  7. *
  8. */
  9. #include "mpi3mr.h"
  10. #include <linux/bsg-lib.h>
  11. #include <uapi/scsi/scsi_bsg_mpi3mr.h>
  12. /**
  13. * mpi3mr_bsg_pel_abort - sends PEL abort request
  14. * @mrioc: Adapter instance reference
  15. *
  16. * This function sends PEL abort request to the firmware through
  17. * admin request queue.
  18. *
  19. * Return: 0 on success, -1 on failure
  20. */
  21. static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc)
  22. {
  23. struct mpi3_pel_req_action_abort pel_abort_req;
  24. struct mpi3_pel_reply *pel_reply;
  25. int retval = 0;
  26. u16 pe_log_status;
  27. if (mrioc->reset_in_progress) {
  28. dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
  29. return -1;
  30. }
  31. if (mrioc->stop_bsgs) {
  32. dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
  33. return -1;
  34. }
  35. memset(&pel_abort_req, 0, sizeof(pel_abort_req));
  36. mutex_lock(&mrioc->pel_abort_cmd.mutex);
  37. if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) {
  38. dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
  39. mutex_unlock(&mrioc->pel_abort_cmd.mutex);
  40. return -1;
  41. }
  42. mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING;
  43. mrioc->pel_abort_cmd.is_waiting = 1;
  44. mrioc->pel_abort_cmd.callback = NULL;
  45. pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT);
  46. pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
  47. pel_abort_req.action = MPI3_PEL_ACTION_ABORT;
  48. pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
  49. mrioc->pel_abort_requested = 1;
  50. init_completion(&mrioc->pel_abort_cmd.done);
  51. retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req,
  52. sizeof(pel_abort_req), 0);
  53. if (retval) {
  54. retval = -1;
  55. dprint_bsg_err(mrioc, "%s: admin request post failed\n",
  56. __func__);
  57. mrioc->pel_abort_requested = 0;
  58. goto out_unlock;
  59. }
  60. wait_for_completion_timeout(&mrioc->pel_abort_cmd.done,
  61. (MPI3MR_INTADMCMD_TIMEOUT * HZ));
  62. if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) {
  63. mrioc->pel_abort_cmd.is_waiting = 0;
  64. dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
  65. if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET))
  66. mpi3mr_soft_reset_handler(mrioc,
  67. MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1);
  68. retval = -1;
  69. goto out_unlock;
  70. }
  71. if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
  72. != MPI3_IOCSTATUS_SUCCESS) {
  73. dprint_bsg_err(mrioc,
  74. "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
  75. __func__, (mrioc->pel_abort_cmd.ioc_status &
  76. MPI3_IOCSTATUS_STATUS_MASK),
  77. mrioc->pel_abort_cmd.ioc_loginfo);
  78. retval = -1;
  79. goto out_unlock;
  80. }
  81. if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) {
  82. pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply;
  83. pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
  84. if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) {
  85. dprint_bsg_err(mrioc,
  86. "%s: command failed, pel_status(0x%04x)\n",
  87. __func__, pe_log_status);
  88. retval = -1;
  89. }
  90. }
  91. out_unlock:
  92. mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
  93. mutex_unlock(&mrioc->pel_abort_cmd.mutex);
  94. return retval;
  95. }
  96. /**
  97. * mpi3mr_bsg_verify_adapter - verify adapter number is valid
  98. * @ioc_number: Adapter number
  99. *
  100. * This function returns the adapter instance pointer of given
  101. * adapter number. If adapter number does not match with the
  102. * driver's adapter list, driver returns NULL.
  103. *
  104. * Return: adapter instance reference
  105. */
  106. static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number)
  107. {
  108. struct mpi3mr_ioc *mrioc = NULL;
  109. spin_lock(&mrioc_list_lock);
  110. list_for_each_entry(mrioc, &mrioc_list, list) {
  111. if (mrioc->id == ioc_number) {
  112. spin_unlock(&mrioc_list_lock);
  113. return mrioc;
  114. }
  115. }
  116. spin_unlock(&mrioc_list_lock);
  117. return NULL;
  118. }
  119. /**
  120. * mpi3mr_enable_logdata - Handler for log data enable
  121. * @mrioc: Adapter instance reference
  122. * @job: BSG job reference
  123. *
  124. * This function enables log data caching in the driver if not
  125. * already enabled and return the maximum number of log data
  126. * entries that can be cached in the driver.
  127. *
  128. * Return: 0 on success and proper error codes on failure
  129. */
  130. static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc,
  131. struct bsg_job *job)
  132. {
  133. struct mpi3mr_logdata_enable logdata_enable;
  134. if (!mrioc->logdata_buf) {
  135. mrioc->logdata_entry_sz =
  136. (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4))
  137. + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ;
  138. mrioc->logdata_buf_idx = 0;
  139. mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES,
  140. mrioc->logdata_entry_sz, GFP_KERNEL);
  141. if (!mrioc->logdata_buf)
  142. return -ENOMEM;
  143. }
  144. memset(&logdata_enable, 0, sizeof(logdata_enable));
  145. logdata_enable.max_entries =
  146. MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
  147. if (job->request_payload.payload_len >= sizeof(logdata_enable)) {
  148. sg_copy_from_buffer(job->request_payload.sg_list,
  149. job->request_payload.sg_cnt,
  150. &logdata_enable, sizeof(logdata_enable));
  151. return 0;
  152. }
  153. return -EINVAL;
  154. }
  155. /**
  156. * mpi3mr_get_logdata - Handler for get log data
  157. * @mrioc: Adapter instance reference
  158. * @job: BSG job pointer
  159. * This function copies the log data entries to the user buffer
  160. * when log caching is enabled in the driver.
  161. *
  162. * Return: 0 on success and proper error codes on failure
  163. */
  164. static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc,
  165. struct bsg_job *job)
  166. {
  167. u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz;
  168. if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz))
  169. return -EINVAL;
  170. num_entries = job->request_payload.payload_len / entry_sz;
  171. if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES)
  172. num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
  173. sz = num_entries * entry_sz;
  174. if (job->request_payload.payload_len >= sz) {
  175. sg_copy_from_buffer(job->request_payload.sg_list,
  176. job->request_payload.sg_cnt,
  177. mrioc->logdata_buf, sz);
  178. return 0;
  179. }
  180. return -EINVAL;
  181. }
  182. /**
  183. * mpi3mr_bsg_pel_enable - Handler for PEL enable driver
  184. * @mrioc: Adapter instance reference
  185. * @job: BSG job pointer
  186. *
  187. * This function is the handler for PEL enable driver.
  188. * Validates the application given class and locale and if
  189. * requires aborts the existing PEL wait request and/or issues
  190. * new PEL wait request to the firmware and returns.
  191. *
  192. * Return: 0 on success and proper error codes on failure.
  193. */
  194. static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
  195. struct bsg_job *job)
  196. {
  197. long rval = -EINVAL;
  198. struct mpi3mr_bsg_out_pel_enable pel_enable;
  199. u8 issue_pel_wait;
  200. u8 tmp_class;
  201. u16 tmp_locale;
  202. if (job->request_payload.payload_len != sizeof(pel_enable)) {
  203. dprint_bsg_err(mrioc, "%s: invalid size argument\n",
  204. __func__);
  205. return rval;
  206. }
  207. sg_copy_to_buffer(job->request_payload.sg_list,
  208. job->request_payload.sg_cnt,
  209. &pel_enable, sizeof(pel_enable));
  210. if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) {
  211. dprint_bsg_err(mrioc, "%s: out of range class %d sent\n",
  212. __func__, pel_enable.pel_class);
  213. rval = 0;
  214. goto out;
  215. }
  216. if (!mrioc->pel_enabled)
  217. issue_pel_wait = 1;
  218. else {
  219. if ((mrioc->pel_class <= pel_enable.pel_class) &&
  220. !((mrioc->pel_locale & pel_enable.pel_locale) ^
  221. pel_enable.pel_locale)) {
  222. issue_pel_wait = 0;
  223. rval = 0;
  224. } else {
  225. pel_enable.pel_locale |= mrioc->pel_locale;
  226. if (mrioc->pel_class < pel_enable.pel_class)
  227. pel_enable.pel_class = mrioc->pel_class;
  228. rval = mpi3mr_bsg_pel_abort(mrioc);
  229. if (rval) {
  230. dprint_bsg_err(mrioc,
  231. "%s: pel_abort failed, status(%ld)\n",
  232. __func__, rval);
  233. goto out;
  234. }
  235. issue_pel_wait = 1;
  236. }
  237. }
  238. if (issue_pel_wait) {
  239. tmp_class = mrioc->pel_class;
  240. tmp_locale = mrioc->pel_locale;
  241. mrioc->pel_class = pel_enable.pel_class;
  242. mrioc->pel_locale = pel_enable.pel_locale;
  243. mrioc->pel_enabled = 1;
  244. rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL);
  245. if (rval) {
  246. mrioc->pel_class = tmp_class;
  247. mrioc->pel_locale = tmp_locale;
  248. mrioc->pel_enabled = 0;
  249. dprint_bsg_err(mrioc,
  250. "%s: pel get sequence number failed, status(%ld)\n",
  251. __func__, rval);
  252. }
  253. }
  254. out:
  255. return rval;
  256. }
  257. /**
  258. * mpi3mr_get_all_tgt_info - Get all target information
  259. * @mrioc: Adapter instance reference
  260. * @job: BSG job reference
  261. *
  262. * This function copies the driver managed target devices device
  263. * handle, persistent ID, bus ID and taret ID to the user
  264. * provided buffer for the specific controller. This function
  265. * also provides the number of devices managed by the driver for
  266. * the specific controller.
  267. *
  268. * Return: 0 on success and proper error codes on failure
  269. */
  270. static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
  271. struct bsg_job *job)
  272. {
  273. u16 num_devices = 0, i = 0, size;
  274. unsigned long flags;
  275. struct mpi3mr_tgt_dev *tgtdev;
  276. struct mpi3mr_device_map_info *devmap_info = NULL;
  277. struct mpi3mr_all_tgt_info *alltgt_info = NULL;
  278. uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0;
  279. if (job->request_payload.payload_len < sizeof(u32)) {
  280. dprint_bsg_err(mrioc, "%s: invalid size argument\n",
  281. __func__);
  282. return -EINVAL;
  283. }
  284. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  285. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
  286. num_devices++;
  287. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  288. if ((job->request_payload.payload_len <= sizeof(u64)) ||
  289. list_empty(&mrioc->tgtdev_list)) {
  290. sg_copy_from_buffer(job->request_payload.sg_list,
  291. job->request_payload.sg_cnt,
  292. &num_devices, sizeof(num_devices));
  293. return 0;
  294. }
  295. kern_entrylen = num_devices * sizeof(*devmap_info);
  296. size = sizeof(u64) + kern_entrylen;
  297. alltgt_info = kzalloc(size, GFP_KERNEL);
  298. if (!alltgt_info)
  299. return -ENOMEM;
  300. devmap_info = alltgt_info->dmi;
  301. memset((u8 *)devmap_info, 0xFF, kern_entrylen);
  302. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  303. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
  304. if (i < num_devices) {
  305. devmap_info[i].handle = tgtdev->dev_handle;
  306. devmap_info[i].perst_id = tgtdev->perst_id;
  307. if (tgtdev->host_exposed && tgtdev->starget) {
  308. devmap_info[i].target_id = tgtdev->starget->id;
  309. devmap_info[i].bus_id =
  310. tgtdev->starget->channel;
  311. }
  312. i++;
  313. }
  314. }
  315. num_devices = i;
  316. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  317. alltgt_info->num_devices = num_devices;
  318. usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) /
  319. sizeof(*devmap_info);
  320. usr_entrylen *= sizeof(*devmap_info);
  321. min_entrylen = min(usr_entrylen, kern_entrylen);
  322. sg_copy_from_buffer(job->request_payload.sg_list,
  323. job->request_payload.sg_cnt,
  324. alltgt_info, (min_entrylen + sizeof(u64)));
  325. kfree(alltgt_info);
  326. return 0;
  327. }
  328. /**
  329. * mpi3mr_get_change_count - Get topology change count
  330. * @mrioc: Adapter instance reference
  331. * @job: BSG job reference
  332. *
  333. * This function copies the toplogy change count provided by the
  334. * driver in events and cached in the driver to the user
  335. * provided buffer for the specific controller.
  336. *
  337. * Return: 0 on success and proper error codes on failure
  338. */
  339. static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc,
  340. struct bsg_job *job)
  341. {
  342. struct mpi3mr_change_count chgcnt;
  343. memset(&chgcnt, 0, sizeof(chgcnt));
  344. chgcnt.change_count = mrioc->change_count;
  345. if (job->request_payload.payload_len >= sizeof(chgcnt)) {
  346. sg_copy_from_buffer(job->request_payload.sg_list,
  347. job->request_payload.sg_cnt,
  348. &chgcnt, sizeof(chgcnt));
  349. return 0;
  350. }
  351. return -EINVAL;
  352. }
  353. /**
  354. * mpi3mr_bsg_adp_reset - Issue controller reset
  355. * @mrioc: Adapter instance reference
  356. * @job: BSG job reference
  357. *
  358. * This function identifies the user provided reset type and
  359. * issues approporiate reset to the controller and wait for that
  360. * to complete and reinitialize the controller and then returns
  361. *
  362. * Return: 0 on success and proper error codes on failure
  363. */
  364. static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc,
  365. struct bsg_job *job)
  366. {
  367. long rval = -EINVAL;
  368. u8 save_snapdump;
  369. struct mpi3mr_bsg_adp_reset adpreset;
  370. if (job->request_payload.payload_len !=
  371. sizeof(adpreset)) {
  372. dprint_bsg_err(mrioc, "%s: invalid size argument\n",
  373. __func__);
  374. goto out;
  375. }
  376. sg_copy_to_buffer(job->request_payload.sg_list,
  377. job->request_payload.sg_cnt,
  378. &adpreset, sizeof(adpreset));
  379. switch (adpreset.reset_type) {
  380. case MPI3MR_BSG_ADPRESET_SOFT:
  381. save_snapdump = 0;
  382. break;
  383. case MPI3MR_BSG_ADPRESET_DIAG_FAULT:
  384. save_snapdump = 1;
  385. break;
  386. default:
  387. dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n",
  388. __func__, adpreset.reset_type);
  389. goto out;
  390. }
  391. rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP,
  392. save_snapdump);
  393. if (rval)
  394. dprint_bsg_err(mrioc,
  395. "%s: reset handler returned error(%ld) for reset type %d\n",
  396. __func__, rval, adpreset.reset_type);
  397. out:
  398. return rval;
  399. }
  400. /**
  401. * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler
  402. * @mrioc: Adapter instance reference
  403. * @job: BSG job reference
  404. *
  405. * This function provides adapter information for the given
  406. * controller
  407. *
  408. * Return: 0 on success and proper error codes on failure
  409. */
  410. static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc,
  411. struct bsg_job *job)
  412. {
  413. enum mpi3mr_iocstate ioc_state;
  414. struct mpi3mr_bsg_in_adpinfo adpinfo;
  415. memset(&adpinfo, 0, sizeof(adpinfo));
  416. adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY;
  417. adpinfo.pci_dev_id = mrioc->pdev->device;
  418. adpinfo.pci_dev_hw_rev = mrioc->pdev->revision;
  419. adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device;
  420. adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor;
  421. adpinfo.pci_bus = mrioc->pdev->bus->number;
  422. adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn);
  423. adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn);
  424. adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus);
  425. adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION;
  426. ioc_state = mpi3mr_get_iocstate(mrioc);
  427. if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
  428. adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
  429. else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
  430. adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
  431. else if (ioc_state == MRIOC_STATE_FAULT)
  432. adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
  433. else
  434. adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
  435. memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info,
  436. sizeof(adpinfo.driver_info));
  437. if (job->request_payload.payload_len >= sizeof(adpinfo)) {
  438. sg_copy_from_buffer(job->request_payload.sg_list,
  439. job->request_payload.sg_cnt,
  440. &adpinfo, sizeof(adpinfo));
  441. return 0;
  442. }
  443. return -EINVAL;
  444. }
  445. /**
  446. * mpi3mr_bsg_process_drv_cmds - Driver Command handler
  447. * @job: BSG job reference
  448. *
  449. * This function is the top level handler for driver commands,
  450. * this does basic validation of the buffer and identifies the
  451. * opcode and switches to correct sub handler.
  452. *
  453. * Return: 0 on success and proper error codes on failure
  454. */
  455. static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
  456. {
  457. long rval = -EINVAL;
  458. struct mpi3mr_ioc *mrioc = NULL;
  459. struct mpi3mr_bsg_packet *bsg_req = NULL;
  460. struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL;
  461. bsg_req = job->request;
  462. drvrcmd = &bsg_req->cmd.drvrcmd;
  463. mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id);
  464. if (!mrioc)
  465. return -ENODEV;
  466. if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) {
  467. rval = mpi3mr_bsg_populate_adpinfo(mrioc, job);
  468. return rval;
  469. }
  470. if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
  471. return -ERESTARTSYS;
  472. switch (drvrcmd->opcode) {
  473. case MPI3MR_DRVBSG_OPCODE_ADPRESET:
  474. rval = mpi3mr_bsg_adp_reset(mrioc, job);
  475. break;
  476. case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO:
  477. rval = mpi3mr_get_all_tgt_info(mrioc, job);
  478. break;
  479. case MPI3MR_DRVBSG_OPCODE_GETCHGCNT:
  480. rval = mpi3mr_get_change_count(mrioc, job);
  481. break;
  482. case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE:
  483. rval = mpi3mr_enable_logdata(mrioc, job);
  484. break;
  485. case MPI3MR_DRVBSG_OPCODE_GETLOGDATA:
  486. rval = mpi3mr_get_logdata(mrioc, job);
  487. break;
  488. case MPI3MR_DRVBSG_OPCODE_PELENABLE:
  489. rval = mpi3mr_bsg_pel_enable(mrioc, job);
  490. break;
  491. case MPI3MR_DRVBSG_OPCODE_UNKNOWN:
  492. default:
  493. pr_err("%s: unsupported driver command opcode %d\n",
  494. MPI3MR_DRIVER_NAME, drvrcmd->opcode);
  495. break;
  496. }
  497. mutex_unlock(&mrioc->bsg_cmds.mutex);
  498. return rval;
  499. }
  500. /**
  501. * mpi3mr_bsg_build_sgl - SGL construction for MPI commands
  502. * @mpi_req: MPI request
  503. * @sgl_offset: offset to start sgl in the MPI request
  504. * @drv_bufs: DMA address of the buffers to be placed in sgl
  505. * @bufcnt: Number of DMA buffers
  506. * @is_rmc: Does the buffer list has management command buffer
  507. * @is_rmr: Does the buffer list has management response buffer
  508. * @num_datasges: Number of data buffers in the list
  509. *
  510. * This function places the DMA address of the given buffers in
  511. * proper format as SGEs in the given MPI request.
  512. *
  513. * Return: Nothing
  514. */
  515. static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset,
  516. struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc,
  517. u8 is_rmr, u8 num_datasges)
  518. {
  519. u8 *sgl = (mpi_req + sgl_offset), count = 0;
  520. struct mpi3_mgmt_passthrough_request *rmgmt_req =
  521. (struct mpi3_mgmt_passthrough_request *)mpi_req;
  522. struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
  523. u8 sgl_flags, sgl_flags_last;
  524. sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
  525. MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER;
  526. sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST;
  527. if (is_rmc) {
  528. mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
  529. sgl_flags_last, drv_buf_iter->kern_buf_len,
  530. drv_buf_iter->kern_buf_dma);
  531. sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len;
  532. drv_buf_iter++;
  533. count++;
  534. if (is_rmr) {
  535. mpi3mr_add_sg_single(&rmgmt_req->response_sgl,
  536. sgl_flags_last, drv_buf_iter->kern_buf_len,
  537. drv_buf_iter->kern_buf_dma);
  538. drv_buf_iter++;
  539. count++;
  540. } else
  541. mpi3mr_build_zero_len_sge(
  542. &rmgmt_req->response_sgl);
  543. }
  544. if (!num_datasges) {
  545. mpi3mr_build_zero_len_sge(sgl);
  546. return;
  547. }
  548. for (; count < bufcnt; count++, drv_buf_iter++) {
  549. if (drv_buf_iter->data_dir == DMA_NONE)
  550. continue;
  551. if (num_datasges == 1 || !is_rmc)
  552. mpi3mr_add_sg_single(sgl, sgl_flags_last,
  553. drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
  554. else
  555. mpi3mr_add_sg_single(sgl, sgl_flags,
  556. drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
  557. sgl += sizeof(struct mpi3_sge_common);
  558. num_datasges--;
  559. }
  560. }
  561. /**
  562. * mpi3mr_get_nvme_data_fmt - returns the NVMe data format
  563. * @nvme_encap_request: NVMe encapsulated MPI request
  564. *
  565. * This function returns the type of the data format specified
  566. * in user provided NVMe command in NVMe encapsulated request.
  567. *
  568. * Return: Data format of the NVMe command (PRP/SGL etc)
  569. */
  570. static unsigned int mpi3mr_get_nvme_data_fmt(
  571. struct mpi3_nvme_encapsulated_request *nvme_encap_request)
  572. {
  573. u8 format = 0;
  574. format = ((nvme_encap_request->command[0] & 0xc000) >> 14);
  575. return format;
  576. }
  577. /**
  578. * mpi3mr_build_nvme_sgl - SGL constructor for NVME
  579. * encapsulated request
  580. * @mrioc: Adapter instance reference
  581. * @nvme_encap_request: NVMe encapsulated MPI request
  582. * @drv_bufs: DMA address of the buffers to be placed in sgl
  583. * @bufcnt: Number of DMA buffers
  584. *
  585. * This function places the DMA address of the given buffers in
  586. * proper format as SGEs in the given NVMe encapsulated request.
  587. *
  588. * Return: 0 on success, -1 on failure
  589. */
  590. static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
  591. struct mpi3_nvme_encapsulated_request *nvme_encap_request,
  592. struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
  593. {
  594. struct mpi3mr_nvme_pt_sge *nvme_sgl;
  595. u64 sgl_ptr;
  596. u8 count;
  597. size_t length = 0;
  598. struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
  599. u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
  600. mrioc->facts.sge_mod_shift) << 32);
  601. u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
  602. mrioc->facts.sge_mod_shift) << 32;
  603. /*
  604. * Not all commands require a data transfer. If no data, just return
  605. * without constructing any sgl.
  606. */
  607. for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
  608. if (drv_buf_iter->data_dir == DMA_NONE)
  609. continue;
  610. sgl_ptr = (u64)drv_buf_iter->kern_buf_dma;
  611. length = drv_buf_iter->kern_buf_len;
  612. break;
  613. }
  614. if (!length)
  615. return 0;
  616. if (sgl_ptr & sgemod_mask) {
  617. dprint_bsg_err(mrioc,
  618. "%s: SGL address collides with SGE modifier\n",
  619. __func__);
  620. return -1;
  621. }
  622. sgl_ptr &= ~sgemod_mask;
  623. sgl_ptr |= sgemod_val;
  624. nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
  625. ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
  626. memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
  627. nvme_sgl->base_addr = sgl_ptr;
  628. nvme_sgl->length = length;
  629. return 0;
  630. }
  631. /**
  632. * mpi3mr_build_nvme_prp - PRP constructor for NVME
  633. * encapsulated request
  634. * @mrioc: Adapter instance reference
  635. * @nvme_encap_request: NVMe encapsulated MPI request
  636. * @drv_bufs: DMA address of the buffers to be placed in SGL
  637. * @bufcnt: Number of DMA buffers
  638. *
  639. * This function places the DMA address of the given buffers in
  640. * proper format as PRP entries in the given NVMe encapsulated
  641. * request.
  642. *
  643. * Return: 0 on success, -1 on failure
  644. */
  645. static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
  646. struct mpi3_nvme_encapsulated_request *nvme_encap_request,
  647. struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
  648. {
  649. int prp_size = MPI3MR_NVME_PRP_SIZE;
  650. __le64 *prp_entry, *prp1_entry, *prp2_entry;
  651. __le64 *prp_page;
  652. dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
  653. u32 offset, entry_len, dev_pgsz;
  654. u32 page_mask_result, page_mask;
  655. size_t length = 0;
  656. u8 count;
  657. struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
  658. u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
  659. mrioc->facts.sge_mod_shift) << 32);
  660. u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
  661. mrioc->facts.sge_mod_shift) << 32;
  662. u16 dev_handle = nvme_encap_request->dev_handle;
  663. struct mpi3mr_tgt_dev *tgtdev;
  664. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
  665. if (!tgtdev) {
  666. dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n",
  667. __func__, dev_handle);
  668. return -1;
  669. }
  670. if (tgtdev->dev_spec.pcie_inf.pgsz == 0) {
  671. dprint_bsg_err(mrioc,
  672. "%s: NVMe device page size is zero for handle 0x%04x\n",
  673. __func__, dev_handle);
  674. mpi3mr_tgtdev_put(tgtdev);
  675. return -1;
  676. }
  677. dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
  678. mpi3mr_tgtdev_put(tgtdev);
  679. /*
  680. * Not all commands require a data transfer. If no data, just return
  681. * without constructing any PRP.
  682. */
  683. for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
  684. if (drv_buf_iter->data_dir == DMA_NONE)
  685. continue;
  686. dma_addr = drv_buf_iter->kern_buf_dma;
  687. length = drv_buf_iter->kern_buf_len;
  688. break;
  689. }
  690. if (!length)
  691. return 0;
  692. mrioc->prp_sz = 0;
  693. mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
  694. dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
  695. if (!mrioc->prp_list_virt)
  696. return -1;
  697. mrioc->prp_sz = dev_pgsz;
  698. /*
  699. * Set pointers to PRP1 and PRP2, which are in the NVMe command.
  700. * PRP1 is located at a 24 byte offset from the start of the NVMe
  701. * command. Then set the current PRP entry pointer to PRP1.
  702. */
  703. prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
  704. MPI3MR_NVME_CMD_PRP1_OFFSET);
  705. prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
  706. MPI3MR_NVME_CMD_PRP2_OFFSET);
  707. prp_entry = prp1_entry;
  708. /*
  709. * For the PRP entries, use the specially allocated buffer of
  710. * contiguous memory.
  711. */
  712. prp_page = (__le64 *)mrioc->prp_list_virt;
  713. prp_page_dma = mrioc->prp_list_dma;
  714. /*
  715. * Check if we are within 1 entry of a page boundary we don't
  716. * want our first entry to be a PRP List entry.
  717. */
  718. page_mask = dev_pgsz - 1;
  719. page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
  720. if (!page_mask_result) {
  721. dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
  722. __func__);
  723. goto err_out;
  724. }
  725. /*
  726. * Set PRP physical pointer, which initially points to the current PRP
  727. * DMA memory page.
  728. */
  729. prp_entry_dma = prp_page_dma;
  730. /* Loop while the length is not zero. */
  731. while (length) {
  732. page_mask_result = (prp_entry_dma + prp_size) & page_mask;
  733. if (!page_mask_result && (length > dev_pgsz)) {
  734. dprint_bsg_err(mrioc,
  735. "%s: single PRP page is not sufficient\n",
  736. __func__);
  737. goto err_out;
  738. }
  739. /* Need to handle if entry will be part of a page. */
  740. offset = dma_addr & page_mask;
  741. entry_len = dev_pgsz - offset;
  742. if (prp_entry == prp1_entry) {
  743. /*
  744. * Must fill in the first PRP pointer (PRP1) before
  745. * moving on.
  746. */
  747. *prp1_entry = cpu_to_le64(dma_addr);
  748. if (*prp1_entry & sgemod_mask) {
  749. dprint_bsg_err(mrioc,
  750. "%s: PRP1 address collides with SGE modifier\n",
  751. __func__);
  752. goto err_out;
  753. }
  754. *prp1_entry &= ~sgemod_mask;
  755. *prp1_entry |= sgemod_val;
  756. /*
  757. * Now point to the second PRP entry within the
  758. * command (PRP2).
  759. */
  760. prp_entry = prp2_entry;
  761. } else if (prp_entry == prp2_entry) {
  762. /*
  763. * Should the PRP2 entry be a PRP List pointer or just
  764. * a regular PRP pointer? If there is more than one
  765. * more page of data, must use a PRP List pointer.
  766. */
  767. if (length > dev_pgsz) {
  768. /*
  769. * PRP2 will contain a PRP List pointer because
  770. * more PRP's are needed with this command. The
  771. * list will start at the beginning of the
  772. * contiguous buffer.
  773. */
  774. *prp2_entry = cpu_to_le64(prp_entry_dma);
  775. if (*prp2_entry & sgemod_mask) {
  776. dprint_bsg_err(mrioc,
  777. "%s: PRP list address collides with SGE modifier\n",
  778. __func__);
  779. goto err_out;
  780. }
  781. *prp2_entry &= ~sgemod_mask;
  782. *prp2_entry |= sgemod_val;
  783. /*
  784. * The next PRP Entry will be the start of the
  785. * first PRP List.
  786. */
  787. prp_entry = prp_page;
  788. continue;
  789. } else {
  790. /*
  791. * After this, the PRP Entries are complete.
  792. * This command uses 2 PRP's and no PRP list.
  793. */
  794. *prp2_entry = cpu_to_le64(dma_addr);
  795. if (*prp2_entry & sgemod_mask) {
  796. dprint_bsg_err(mrioc,
  797. "%s: PRP2 collides with SGE modifier\n",
  798. __func__);
  799. goto err_out;
  800. }
  801. *prp2_entry &= ~sgemod_mask;
  802. *prp2_entry |= sgemod_val;
  803. }
  804. } else {
  805. /*
  806. * Put entry in list and bump the addresses.
  807. *
  808. * After PRP1 and PRP2 are filled in, this will fill in
  809. * all remaining PRP entries in a PRP List, one per
  810. * each time through the loop.
  811. */
  812. *prp_entry = cpu_to_le64(dma_addr);
  813. if (*prp_entry & sgemod_mask) {
  814. dprint_bsg_err(mrioc,
  815. "%s: PRP address collides with SGE modifier\n",
  816. __func__);
  817. goto err_out;
  818. }
  819. *prp_entry &= ~sgemod_mask;
  820. *prp_entry |= sgemod_val;
  821. prp_entry++;
  822. prp_entry_dma += prp_size;
  823. }
  824. /*
  825. * Bump the phys address of the command's data buffer by the
  826. * entry_len.
  827. */
  828. dma_addr += entry_len;
  829. /* decrement length accounting for last partial page. */
  830. if (entry_len > length)
  831. length = 0;
  832. else
  833. length -= entry_len;
  834. }
  835. return 0;
  836. err_out:
  837. if (mrioc->prp_list_virt) {
  838. dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
  839. mrioc->prp_list_virt, mrioc->prp_list_dma);
  840. mrioc->prp_list_virt = NULL;
  841. }
  842. return -1;
  843. }
  844. /**
  845. * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
  846. * @job: BSG job reference
  847. *
  848. * This function is the top level handler for MPI Pass through
  849. * command, this does basic validation of the input data buffers,
  850. * identifies the given buffer types and MPI command, allocates
  851. * DMAable memory for user given buffers, construstcs SGL
  852. * properly and passes the command to the firmware.
  853. *
  854. * Once the MPI command is completed the driver copies the data
  855. * if any and reply, sense information to user provided buffers.
  856. * If the command is timed out then issues controller reset
  857. * prior to returning.
  858. *
  859. * Return: 0 on success and proper error codes on failure
  860. */
  861. static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len)
  862. {
  863. long rval = -EINVAL;
  864. struct mpi3mr_ioc *mrioc = NULL;
  865. u8 *mpi_req = NULL, *sense_buff_k = NULL;
  866. u8 mpi_msg_size = 0;
  867. struct mpi3mr_bsg_packet *bsg_req = NULL;
  868. struct mpi3mr_bsg_mptcmd *karg;
  869. struct mpi3mr_buf_entry *buf_entries = NULL;
  870. struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
  871. u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0;
  872. u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0;
  873. u8 block_io = 0, resp_code = 0, nvme_fmt = 0;
  874. struct mpi3_request_header *mpi_header = NULL;
  875. struct mpi3_status_reply_descriptor *status_desc;
  876. struct mpi3_scsi_task_mgmt_request *tm_req;
  877. u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen;
  878. u16 dev_handle;
  879. struct mpi3mr_tgt_dev *tgtdev;
  880. struct mpi3mr_stgt_priv_data *stgt_priv = NULL;
  881. struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL;
  882. u32 din_size = 0, dout_size = 0;
  883. u8 *din_buf = NULL, *dout_buf = NULL;
  884. u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
  885. bsg_req = job->request;
  886. karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
  887. mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id);
  888. if (!mrioc)
  889. return -ENODEV;
  890. if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
  891. karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
  892. mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL);
  893. if (!mpi_req)
  894. return -ENOMEM;
  895. mpi_header = (struct mpi3_request_header *)mpi_req;
  896. bufcnt = karg->buf_entry_list.num_of_entries;
  897. drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL);
  898. if (!drv_bufs) {
  899. rval = -ENOMEM;
  900. goto out;
  901. }
  902. dout_buf = kzalloc(job->request_payload.payload_len,
  903. GFP_KERNEL);
  904. if (!dout_buf) {
  905. rval = -ENOMEM;
  906. goto out;
  907. }
  908. din_buf = kzalloc(job->reply_payload.payload_len,
  909. GFP_KERNEL);
  910. if (!din_buf) {
  911. rval = -ENOMEM;
  912. goto out;
  913. }
  914. sg_copy_to_buffer(job->request_payload.sg_list,
  915. job->request_payload.sg_cnt,
  916. dout_buf, job->request_payload.payload_len);
  917. buf_entries = karg->buf_entry_list.buf_entry;
  918. sgl_din_iter = din_buf;
  919. sgl_dout_iter = dout_buf;
  920. drv_buf_iter = drv_bufs;
  921. for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
  922. if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
  923. dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
  924. __func__);
  925. rval = -EINVAL;
  926. goto out;
  927. }
  928. if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
  929. dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
  930. __func__);
  931. rval = -EINVAL;
  932. goto out;
  933. }
  934. switch (buf_entries->buf_type) {
  935. case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
  936. sgl_iter = sgl_dout_iter;
  937. sgl_dout_iter += buf_entries->buf_len;
  938. drv_buf_iter->data_dir = DMA_TO_DEVICE;
  939. is_rmcb = 1;
  940. if (count != 0)
  941. invalid_be = 1;
  942. break;
  943. case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
  944. sgl_iter = sgl_din_iter;
  945. sgl_din_iter += buf_entries->buf_len;
  946. drv_buf_iter->data_dir = DMA_FROM_DEVICE;
  947. is_rmrb = 1;
  948. if (count != 1 || !is_rmcb)
  949. invalid_be = 1;
  950. break;
  951. case MPI3MR_BSG_BUFTYPE_DATA_IN:
  952. sgl_iter = sgl_din_iter;
  953. sgl_din_iter += buf_entries->buf_len;
  954. drv_buf_iter->data_dir = DMA_FROM_DEVICE;
  955. din_cnt++;
  956. din_size += drv_buf_iter->bsg_buf_len;
  957. if ((din_cnt > 1) && !is_rmcb)
  958. invalid_be = 1;
  959. break;
  960. case MPI3MR_BSG_BUFTYPE_DATA_OUT:
  961. sgl_iter = sgl_dout_iter;
  962. sgl_dout_iter += buf_entries->buf_len;
  963. drv_buf_iter->data_dir = DMA_TO_DEVICE;
  964. dout_cnt++;
  965. dout_size += drv_buf_iter->bsg_buf_len;
  966. if ((dout_cnt > 1) && !is_rmcb)
  967. invalid_be = 1;
  968. break;
  969. case MPI3MR_BSG_BUFTYPE_MPI_REPLY:
  970. sgl_iter = sgl_din_iter;
  971. sgl_din_iter += buf_entries->buf_len;
  972. drv_buf_iter->data_dir = DMA_NONE;
  973. mpirep_offset = count;
  974. break;
  975. case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
  976. sgl_iter = sgl_din_iter;
  977. sgl_din_iter += buf_entries->buf_len;
  978. drv_buf_iter->data_dir = DMA_NONE;
  979. erb_offset = count;
  980. break;
  981. case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
  982. sgl_iter = sgl_dout_iter;
  983. sgl_dout_iter += buf_entries->buf_len;
  984. drv_buf_iter->data_dir = DMA_NONE;
  985. mpi_msg_size = buf_entries->buf_len;
  986. if ((!mpi_msg_size || (mpi_msg_size % 4)) ||
  987. (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) {
  988. dprint_bsg_err(mrioc, "%s: invalid MPI message size\n",
  989. __func__);
  990. rval = -EINVAL;
  991. goto out;
  992. }
  993. memcpy(mpi_req, sgl_iter, buf_entries->buf_len);
  994. break;
  995. default:
  996. invalid_be = 1;
  997. break;
  998. }
  999. if (invalid_be) {
  1000. dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n",
  1001. __func__);
  1002. rval = -EINVAL;
  1003. goto out;
  1004. }
  1005. drv_buf_iter->bsg_buf = sgl_iter;
  1006. drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
  1007. }
  1008. if (!is_rmcb && (dout_cnt || din_cnt)) {
  1009. sg_entries = dout_cnt + din_cnt;
  1010. if (((mpi_msg_size) + (sg_entries *
  1011. sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) {
  1012. dprint_bsg_err(mrioc,
  1013. "%s:%d: invalid message size passed\n",
  1014. __func__, __LINE__);
  1015. rval = -EINVAL;
  1016. goto out;
  1017. }
  1018. }
  1019. if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
  1020. dprint_bsg_err(mrioc,
  1021. "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
  1022. __func__, __LINE__, mpi_header->function, din_size);
  1023. rval = -EINVAL;
  1024. goto out;
  1025. }
  1026. if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) {
  1027. dprint_bsg_err(mrioc,
  1028. "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
  1029. __func__, __LINE__, mpi_header->function, dout_size);
  1030. rval = -EINVAL;
  1031. goto out;
  1032. }
  1033. drv_buf_iter = drv_bufs;
  1034. for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
  1035. if (drv_buf_iter->data_dir == DMA_NONE)
  1036. continue;
  1037. drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
  1038. if (is_rmcb && !count)
  1039. drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) *
  1040. sizeof(struct mpi3_sge_common));
  1041. if (!drv_buf_iter->kern_buf_len)
  1042. continue;
  1043. drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev,
  1044. drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma,
  1045. GFP_KERNEL);
  1046. if (!drv_buf_iter->kern_buf) {
  1047. rval = -ENOMEM;
  1048. goto out;
  1049. }
  1050. if (drv_buf_iter->data_dir == DMA_TO_DEVICE) {
  1051. tmplen = min(drv_buf_iter->kern_buf_len,
  1052. drv_buf_iter->bsg_buf_len);
  1053. memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
  1054. }
  1055. }
  1056. if (erb_offset != 0xFF) {
  1057. sense_buff_k = kzalloc(erbsz, GFP_KERNEL);
  1058. if (!sense_buff_k) {
  1059. rval = -ENOMEM;
  1060. goto out;
  1061. }
  1062. }
  1063. if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) {
  1064. rval = -ERESTARTSYS;
  1065. goto out;
  1066. }
  1067. if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
  1068. rval = -EAGAIN;
  1069. dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
  1070. mutex_unlock(&mrioc->bsg_cmds.mutex);
  1071. goto out;
  1072. }
  1073. if (mrioc->unrecoverable) {
  1074. dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
  1075. __func__);
  1076. rval = -EFAULT;
  1077. mutex_unlock(&mrioc->bsg_cmds.mutex);
  1078. goto out;
  1079. }
  1080. if (mrioc->reset_in_progress) {
  1081. dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
  1082. rval = -EAGAIN;
  1083. mutex_unlock(&mrioc->bsg_cmds.mutex);
  1084. goto out;
  1085. }
  1086. if (mrioc->stop_bsgs) {
  1087. dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
  1088. rval = -EAGAIN;
  1089. mutex_unlock(&mrioc->bsg_cmds.mutex);
  1090. goto out;
  1091. }
  1092. if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) {
  1093. nvme_fmt = mpi3mr_get_nvme_data_fmt(
  1094. (struct mpi3_nvme_encapsulated_request *)mpi_req);
  1095. if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) {
  1096. if (mpi3mr_build_nvme_prp(mrioc,
  1097. (struct mpi3_nvme_encapsulated_request *)mpi_req,
  1098. drv_bufs, bufcnt)) {
  1099. rval = -ENOMEM;
  1100. mutex_unlock(&mrioc->bsg_cmds.mutex);
  1101. goto out;
  1102. }
  1103. } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 ||
  1104. nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) {
  1105. if (mpi3mr_build_nvme_sgl(mrioc,
  1106. (struct mpi3_nvme_encapsulated_request *)mpi_req,
  1107. drv_bufs, bufcnt)) {
  1108. rval = -EINVAL;
  1109. mutex_unlock(&mrioc->bsg_cmds.mutex);
  1110. goto out;
  1111. }
  1112. } else {
  1113. dprint_bsg_err(mrioc,
  1114. "%s:invalid NVMe command format\n", __func__);
  1115. rval = -EINVAL;
  1116. mutex_unlock(&mrioc->bsg_cmds.mutex);
  1117. goto out;
  1118. }
  1119. } else {
  1120. mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size),
  1121. drv_bufs, bufcnt, is_rmcb, is_rmrb,
  1122. (dout_cnt + din_cnt));
  1123. }
  1124. if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
  1125. tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req;
  1126. if (tm_req->task_type !=
  1127. MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
  1128. dev_handle = tm_req->dev_handle;
  1129. block_io = 1;
  1130. }
  1131. }
  1132. if (block_io) {
  1133. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
  1134. if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
  1135. stgt_priv = (struct mpi3mr_stgt_priv_data *)
  1136. tgtdev->starget->hostdata;
  1137. atomic_inc(&stgt_priv->block_io);
  1138. mpi3mr_tgtdev_put(tgtdev);
  1139. }
  1140. }
  1141. mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING;
  1142. mrioc->bsg_cmds.is_waiting = 1;
  1143. mrioc->bsg_cmds.callback = NULL;
  1144. mrioc->bsg_cmds.is_sense = 0;
  1145. mrioc->bsg_cmds.sensebuf = sense_buff_k;
  1146. memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz);
  1147. mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS);
  1148. if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) {
  1149. dprint_bsg_info(mrioc,
  1150. "%s: posting bsg request to the controller\n", __func__);
  1151. dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
  1152. "bsg_mpi3_req");
  1153. if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
  1154. drv_buf_iter = &drv_bufs[0];
  1155. dprint_dump(drv_buf_iter->kern_buf,
  1156. drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
  1157. }
  1158. }
  1159. init_completion(&mrioc->bsg_cmds.done);
  1160. rval = mpi3mr_admin_request_post(mrioc, mpi_req,
  1161. MPI3MR_ADMIN_REQ_FRAME_SZ, 0);
  1162. if (rval) {
  1163. mrioc->bsg_cmds.is_waiting = 0;
  1164. dprint_bsg_err(mrioc,
  1165. "%s: posting bsg request is failed\n", __func__);
  1166. rval = -EAGAIN;
  1167. goto out_unlock;
  1168. }
  1169. wait_for_completion_timeout(&mrioc->bsg_cmds.done,
  1170. (karg->timeout * HZ));
  1171. if (block_io && stgt_priv)
  1172. atomic_dec(&stgt_priv->block_io);
  1173. if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) {
  1174. mrioc->bsg_cmds.is_waiting = 0;
  1175. rval = -EAGAIN;
  1176. if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)
  1177. goto out_unlock;
  1178. dprint_bsg_err(mrioc,
  1179. "%s: bsg request timedout after %d seconds\n", __func__,
  1180. karg->timeout);
  1181. if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) {
  1182. dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
  1183. "bsg_mpi3_req");
  1184. if (mpi_header->function ==
  1185. MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
  1186. drv_buf_iter = &drv_bufs[0];
  1187. dprint_dump(drv_buf_iter->kern_buf,
  1188. drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
  1189. }
  1190. }
  1191. if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
  1192. (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO))
  1193. mpi3mr_issue_tm(mrioc,
  1194. MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
  1195. mpi_header->function_dependent, 0,
  1196. MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT,
  1197. &mrioc->host_tm_cmds, &resp_code, NULL);
  1198. if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) &&
  1199. !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET))
  1200. mpi3mr_soft_reset_handler(mrioc,
  1201. MPI3MR_RESET_FROM_APP_TIMEOUT, 1);
  1202. goto out_unlock;
  1203. }
  1204. dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__);
  1205. if (mrioc->prp_list_virt) {
  1206. dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
  1207. mrioc->prp_list_virt, mrioc->prp_list_dma);
  1208. mrioc->prp_list_virt = NULL;
  1209. }
  1210. if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
  1211. != MPI3_IOCSTATUS_SUCCESS) {
  1212. dprint_bsg_info(mrioc,
  1213. "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
  1214. __func__,
  1215. (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
  1216. mrioc->bsg_cmds.ioc_loginfo);
  1217. }
  1218. if ((mpirep_offset != 0xFF) &&
  1219. drv_bufs[mpirep_offset].bsg_buf_len) {
  1220. drv_buf_iter = &drv_bufs[mpirep_offset];
  1221. drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
  1222. mrioc->reply_sz);
  1223. bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
  1224. if (!bsg_reply_buf) {
  1225. rval = -ENOMEM;
  1226. goto out_unlock;
  1227. }
  1228. if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) {
  1229. bsg_reply_buf->mpi_reply_type =
  1230. MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS;
  1231. memcpy(bsg_reply_buf->reply_buf,
  1232. mrioc->bsg_cmds.reply, mrioc->reply_sz);
  1233. } else {
  1234. bsg_reply_buf->mpi_reply_type =
  1235. MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS;
  1236. status_desc = (struct mpi3_status_reply_descriptor *)
  1237. bsg_reply_buf->reply_buf;
  1238. status_desc->ioc_status = mrioc->bsg_cmds.ioc_status;
  1239. status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo;
  1240. }
  1241. tmplen = min(drv_buf_iter->kern_buf_len,
  1242. drv_buf_iter->bsg_buf_len);
  1243. memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen);
  1244. }
  1245. if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf &&
  1246. mrioc->bsg_cmds.is_sense) {
  1247. drv_buf_iter = &drv_bufs[erb_offset];
  1248. tmplen = min(erbsz, drv_buf_iter->bsg_buf_len);
  1249. memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen);
  1250. }
  1251. drv_buf_iter = drv_bufs;
  1252. for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
  1253. if (drv_buf_iter->data_dir == DMA_NONE)
  1254. continue;
  1255. if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
  1256. tmplen = min(drv_buf_iter->kern_buf_len,
  1257. drv_buf_iter->bsg_buf_len);
  1258. memcpy(drv_buf_iter->bsg_buf,
  1259. drv_buf_iter->kern_buf, tmplen);
  1260. }
  1261. }
  1262. out_unlock:
  1263. if (din_buf) {
  1264. *reply_payload_rcv_len =
  1265. sg_copy_from_buffer(job->reply_payload.sg_list,
  1266. job->reply_payload.sg_cnt,
  1267. din_buf, job->reply_payload.payload_len);
  1268. }
  1269. mrioc->bsg_cmds.is_sense = 0;
  1270. mrioc->bsg_cmds.sensebuf = NULL;
  1271. mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED;
  1272. mutex_unlock(&mrioc->bsg_cmds.mutex);
  1273. out:
  1274. kfree(sense_buff_k);
  1275. kfree(dout_buf);
  1276. kfree(din_buf);
  1277. kfree(mpi_req);
  1278. if (drv_bufs) {
  1279. drv_buf_iter = drv_bufs;
  1280. for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
  1281. if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma)
  1282. dma_free_coherent(&mrioc->pdev->dev,
  1283. drv_buf_iter->kern_buf_len,
  1284. drv_buf_iter->kern_buf,
  1285. drv_buf_iter->kern_buf_dma);
  1286. }
  1287. kfree(drv_bufs);
  1288. }
  1289. kfree(bsg_reply_buf);
  1290. return rval;
  1291. }
  1292. /**
  1293. * mpi3mr_app_save_logdata - Save Log Data events
  1294. * @mrioc: Adapter instance reference
  1295. * @event_data: event data associated with log data event
  1296. * @event_data_size: event data size to copy
  1297. *
  1298. * If log data event caching is enabled by the applicatiobns,
  1299. * then this function saves the log data in the circular queue
  1300. * and Sends async signal SIGIO to indicate there is an async
  1301. * event from the firmware to the event monitoring applications.
  1302. *
  1303. * Return:Nothing
  1304. */
  1305. void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
  1306. u16 event_data_size)
  1307. {
  1308. u32 index = mrioc->logdata_buf_idx, sz;
  1309. struct mpi3mr_logdata_entry *entry;
  1310. if (!(mrioc->logdata_buf))
  1311. return;
  1312. entry = (struct mpi3mr_logdata_entry *)
  1313. (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz));
  1314. entry->valid_entry = 1;
  1315. sz = min(mrioc->logdata_entry_sz, event_data_size);
  1316. memcpy(entry->data, event_data, sz);
  1317. mrioc->logdata_buf_idx =
  1318. ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES);
  1319. atomic64_inc(&event_counter);
  1320. }
  1321. /**
  1322. * mpi3mr_bsg_request - bsg request entry point
  1323. * @job: BSG job reference
  1324. *
  1325. * This is driver's entry point for bsg requests
  1326. *
  1327. * Return: 0 on success and proper error codes on failure
  1328. */
  1329. static int mpi3mr_bsg_request(struct bsg_job *job)
  1330. {
  1331. long rval = -EINVAL;
  1332. unsigned int reply_payload_rcv_len = 0;
  1333. struct mpi3mr_bsg_packet *bsg_req = job->request;
  1334. switch (bsg_req->cmd_type) {
  1335. case MPI3MR_DRV_CMD:
  1336. rval = mpi3mr_bsg_process_drv_cmds(job);
  1337. break;
  1338. case MPI3MR_MPT_CMD:
  1339. rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len);
  1340. break;
  1341. default:
  1342. pr_err("%s: unsupported BSG command(0x%08x)\n",
  1343. MPI3MR_DRIVER_NAME, bsg_req->cmd_type);
  1344. break;
  1345. }
  1346. bsg_job_done(job, rval, reply_payload_rcv_len);
  1347. return 0;
  1348. }
  1349. /**
  1350. * mpi3mr_bsg_exit - de-registration from bsg layer
  1351. *
  1352. * This will be called during driver unload and all
  1353. * bsg resources allocated during load will be freed.
  1354. *
  1355. * Return:Nothing
  1356. */
  1357. void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
  1358. {
  1359. struct device *bsg_dev = &mrioc->bsg_dev;
  1360. if (!mrioc->bsg_queue)
  1361. return;
  1362. bsg_remove_queue(mrioc->bsg_queue);
  1363. mrioc->bsg_queue = NULL;
  1364. device_del(bsg_dev);
  1365. put_device(bsg_dev);
  1366. }
  1367. /**
  1368. * mpi3mr_bsg_node_release -release bsg device node
  1369. * @dev: bsg device node
  1370. *
  1371. * decrements bsg dev parent reference count
  1372. *
  1373. * Return:Nothing
  1374. */
  1375. static void mpi3mr_bsg_node_release(struct device *dev)
  1376. {
  1377. put_device(dev->parent);
  1378. }
  1379. /**
  1380. * mpi3mr_bsg_init - registration with bsg layer
  1381. *
  1382. * This will be called during driver load and it will
  1383. * register driver with bsg layer
  1384. *
  1385. * Return:Nothing
  1386. */
  1387. void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
  1388. {
  1389. struct device *bsg_dev = &mrioc->bsg_dev;
  1390. struct device *parent = &mrioc->shost->shost_gendev;
  1391. device_initialize(bsg_dev);
  1392. bsg_dev->parent = get_device(parent);
  1393. bsg_dev->release = mpi3mr_bsg_node_release;
  1394. dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
  1395. if (device_add(bsg_dev)) {
  1396. ioc_err(mrioc, "%s: bsg device add failed\n",
  1397. dev_name(bsg_dev));
  1398. put_device(bsg_dev);
  1399. return;
  1400. }
  1401. mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
  1402. mpi3mr_bsg_request, NULL, 0);
  1403. if (IS_ERR(mrioc->bsg_queue)) {
  1404. ioc_err(mrioc, "%s: bsg registration failed\n",
  1405. dev_name(bsg_dev));
  1406. device_del(bsg_dev);
  1407. put_device(bsg_dev);
  1408. return;
  1409. }
  1410. blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
  1411. blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
  1412. return;
  1413. }
  1414. /**
  1415. * version_fw_show - SysFS callback for firmware version read
  1416. * @dev: class device
  1417. * @attr: Device attributes
  1418. * @buf: Buffer to copy
  1419. *
  1420. * Return: sysfs_emit() return after copying firmware version
  1421. */
  1422. static ssize_t
  1423. version_fw_show(struct device *dev, struct device_attribute *attr,
  1424. char *buf)
  1425. {
  1426. struct Scsi_Host *shost = class_to_shost(dev);
  1427. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  1428. struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
  1429. return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n",
  1430. fwver->gen_major, fwver->gen_minor, fwver->ph_major,
  1431. fwver->ph_minor, fwver->cust_id, fwver->build_num);
  1432. }
  1433. static DEVICE_ATTR_RO(version_fw);
  1434. /**
  1435. * fw_queue_depth_show - SysFS callback for firmware max cmds
  1436. * @dev: class device
  1437. * @attr: Device attributes
  1438. * @buf: Buffer to copy
  1439. *
  1440. * Return: sysfs_emit() return after copying firmware max commands
  1441. */
  1442. static ssize_t
  1443. fw_queue_depth_show(struct device *dev, struct device_attribute *attr,
  1444. char *buf)
  1445. {
  1446. struct Scsi_Host *shost = class_to_shost(dev);
  1447. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  1448. return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs);
  1449. }
  1450. static DEVICE_ATTR_RO(fw_queue_depth);
  1451. /**
  1452. * op_req_q_count_show - SysFS callback for request queue count
  1453. * @dev: class device
  1454. * @attr: Device attributes
  1455. * @buf: Buffer to copy
  1456. *
  1457. * Return: sysfs_emit() return after copying request queue count
  1458. */
  1459. static ssize_t
  1460. op_req_q_count_show(struct device *dev, struct device_attribute *attr,
  1461. char *buf)
  1462. {
  1463. struct Scsi_Host *shost = class_to_shost(dev);
  1464. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  1465. return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q);
  1466. }
  1467. static DEVICE_ATTR_RO(op_req_q_count);
  1468. /**
  1469. * reply_queue_count_show - SysFS callback for reply queue count
  1470. * @dev: class device
  1471. * @attr: Device attributes
  1472. * @buf: Buffer to copy
  1473. *
  1474. * Return: sysfs_emit() return after copying reply queue count
  1475. */
  1476. static ssize_t
  1477. reply_queue_count_show(struct device *dev, struct device_attribute *attr,
  1478. char *buf)
  1479. {
  1480. struct Scsi_Host *shost = class_to_shost(dev);
  1481. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  1482. return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q);
  1483. }
  1484. static DEVICE_ATTR_RO(reply_queue_count);
  1485. /**
  1486. * logging_level_show - Show controller debug level
  1487. * @dev: class device
  1488. * @attr: Device attributes
  1489. * @buf: Buffer to copy
  1490. *
  1491. * A sysfs 'read/write' shost attribute, to show the current
  1492. * debug log level used by the driver for the specific
  1493. * controller.
  1494. *
  1495. * Return: sysfs_emit() return
  1496. */
  1497. static ssize_t
  1498. logging_level_show(struct device *dev,
  1499. struct device_attribute *attr, char *buf)
  1500. {
  1501. struct Scsi_Host *shost = class_to_shost(dev);
  1502. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  1503. return sysfs_emit(buf, "%08xh\n", mrioc->logging_level);
  1504. }
  1505. /**
  1506. * logging_level_store- Change controller debug level
  1507. * @dev: class device
  1508. * @attr: Device attributes
  1509. * @buf: Buffer to copy
  1510. * @count: size of the buffer
  1511. *
  1512. * A sysfs 'read/write' shost attribute, to change the current
  1513. * debug log level used by the driver for the specific
  1514. * controller.
  1515. *
  1516. * Return: strlen() return
  1517. */
  1518. static ssize_t
  1519. logging_level_store(struct device *dev,
  1520. struct device_attribute *attr,
  1521. const char *buf, size_t count)
  1522. {
  1523. struct Scsi_Host *shost = class_to_shost(dev);
  1524. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  1525. int val = 0;
  1526. if (kstrtoint(buf, 0, &val) != 0)
  1527. return -EINVAL;
  1528. mrioc->logging_level = val;
  1529. ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level);
  1530. return strlen(buf);
  1531. }
  1532. static DEVICE_ATTR_RW(logging_level);
  1533. /**
  1534. * adp_state_show() - SysFS callback for adapter state show
  1535. * @dev: class device
  1536. * @attr: Device attributes
  1537. * @buf: Buffer to copy
  1538. *
  1539. * Return: sysfs_emit() return after copying adapter state
  1540. */
  1541. static ssize_t
  1542. adp_state_show(struct device *dev, struct device_attribute *attr,
  1543. char *buf)
  1544. {
  1545. struct Scsi_Host *shost = class_to_shost(dev);
  1546. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  1547. enum mpi3mr_iocstate ioc_state;
  1548. uint8_t adp_state;
  1549. ioc_state = mpi3mr_get_iocstate(mrioc);
  1550. if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
  1551. adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
  1552. else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
  1553. adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
  1554. else if (ioc_state == MRIOC_STATE_FAULT)
  1555. adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
  1556. else
  1557. adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
  1558. return sysfs_emit(buf, "%u\n", adp_state);
  1559. }
  1560. static DEVICE_ATTR_RO(adp_state);
  1561. static struct attribute *mpi3mr_host_attrs[] = {
  1562. &dev_attr_version_fw.attr,
  1563. &dev_attr_fw_queue_depth.attr,
  1564. &dev_attr_op_req_q_count.attr,
  1565. &dev_attr_reply_queue_count.attr,
  1566. &dev_attr_logging_level.attr,
  1567. &dev_attr_adp_state.attr,
  1568. NULL,
  1569. };
  1570. static const struct attribute_group mpi3mr_host_attr_group = {
  1571. .attrs = mpi3mr_host_attrs
  1572. };
  1573. const struct attribute_group *mpi3mr_host_groups[] = {
  1574. &mpi3mr_host_attr_group,
  1575. NULL,
  1576. };
  1577. /*
  1578. * SCSI Device attributes under sysfs
  1579. */
  1580. /**
  1581. * sas_address_show - SysFS callback for dev SASaddress display
  1582. * @dev: class device
  1583. * @attr: Device attributes
  1584. * @buf: Buffer to copy
  1585. *
  1586. * Return: sysfs_emit() return after copying SAS address of the
  1587. * specific SAS/SATA end device.
  1588. */
  1589. static ssize_t
  1590. sas_address_show(struct device *dev, struct device_attribute *attr,
  1591. char *buf)
  1592. {
  1593. struct scsi_device *sdev = to_scsi_device(dev);
  1594. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  1595. struct mpi3mr_stgt_priv_data *tgt_priv_data;
  1596. struct mpi3mr_tgt_dev *tgtdev;
  1597. sdev_priv_data = sdev->hostdata;
  1598. if (!sdev_priv_data)
  1599. return 0;
  1600. tgt_priv_data = sdev_priv_data->tgt_priv_data;
  1601. if (!tgt_priv_data)
  1602. return 0;
  1603. tgtdev = tgt_priv_data->tgt_dev;
  1604. if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA)
  1605. return 0;
  1606. return sysfs_emit(buf, "0x%016llx\n",
  1607. (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address);
  1608. }
  1609. static DEVICE_ATTR_RO(sas_address);
  1610. /**
  1611. * device_handle_show - SysFS callback for device handle display
  1612. * @dev: class device
  1613. * @attr: Device attributes
  1614. * @buf: Buffer to copy
  1615. *
  1616. * Return: sysfs_emit() return after copying firmware internal
  1617. * device handle of the specific device.
  1618. */
  1619. static ssize_t
  1620. device_handle_show(struct device *dev, struct device_attribute *attr,
  1621. char *buf)
  1622. {
  1623. struct scsi_device *sdev = to_scsi_device(dev);
  1624. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  1625. struct mpi3mr_stgt_priv_data *tgt_priv_data;
  1626. struct mpi3mr_tgt_dev *tgtdev;
  1627. sdev_priv_data = sdev->hostdata;
  1628. if (!sdev_priv_data)
  1629. return 0;
  1630. tgt_priv_data = sdev_priv_data->tgt_priv_data;
  1631. if (!tgt_priv_data)
  1632. return 0;
  1633. tgtdev = tgt_priv_data->tgt_dev;
  1634. if (!tgtdev)
  1635. return 0;
  1636. return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle);
  1637. }
  1638. static DEVICE_ATTR_RO(device_handle);
  1639. /**
  1640. * persistent_id_show - SysFS callback for persisten ID display
  1641. * @dev: class device
  1642. * @attr: Device attributes
  1643. * @buf: Buffer to copy
  1644. *
  1645. * Return: sysfs_emit() return after copying persistent ID of the
  1646. * of the specific device.
  1647. */
  1648. static ssize_t
  1649. persistent_id_show(struct device *dev, struct device_attribute *attr,
  1650. char *buf)
  1651. {
  1652. struct scsi_device *sdev = to_scsi_device(dev);
  1653. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  1654. struct mpi3mr_stgt_priv_data *tgt_priv_data;
  1655. struct mpi3mr_tgt_dev *tgtdev;
  1656. sdev_priv_data = sdev->hostdata;
  1657. if (!sdev_priv_data)
  1658. return 0;
  1659. tgt_priv_data = sdev_priv_data->tgt_priv_data;
  1660. if (!tgt_priv_data)
  1661. return 0;
  1662. tgtdev = tgt_priv_data->tgt_dev;
  1663. if (!tgtdev)
  1664. return 0;
  1665. return sysfs_emit(buf, "%d\n", tgtdev->perst_id);
  1666. }
  1667. static DEVICE_ATTR_RO(persistent_id);
  1668. static struct attribute *mpi3mr_dev_attrs[] = {
  1669. &dev_attr_sas_address.attr,
  1670. &dev_attr_device_handle.attr,
  1671. &dev_attr_persistent_id.attr,
  1672. NULL,
  1673. };
  1674. static const struct attribute_group mpi3mr_dev_attr_group = {
  1675. .attrs = mpi3mr_dev_attrs
  1676. };
  1677. const struct attribute_group *mpi3mr_dev_groups[] = {
  1678. &mpi3mr_dev_attr_group,
  1679. NULL,
  1680. };