qla_bsg.c 86 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2014 QLogic Corporation
  5. */
  6. #include "qla_def.h"
  7. #include "qla_gbl.h"
  8. #include <linux/kthread.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/delay.h>
  11. #include <linux/bsg-lib.h>
  12. static void qla2xxx_free_fcport_work(struct work_struct *work)
  13. {
  14. struct fc_port *fcport = container_of(work, typeof(*fcport),
  15. free_work);
  16. qla2x00_free_fcport(fcport);
  17. }
  18. /* BSG support for ELS/CT pass through */
  19. void qla2x00_bsg_job_done(srb_t *sp, int res)
  20. {
  21. struct bsg_job *bsg_job = sp->u.bsg_job;
  22. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  23. ql_dbg(ql_dbg_user, sp->vha, 0x7009,
  24. "%s: sp hdl %x, result=%x bsg ptr %p\n",
  25. __func__, sp->handle, res, bsg_job);
  26. /* ref: INIT */
  27. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  28. bsg_reply->result = res;
  29. bsg_job_done(bsg_job, bsg_reply->result,
  30. bsg_reply->reply_payload_rcv_len);
  31. }
  32. void qla2x00_bsg_sp_free(srb_t *sp)
  33. {
  34. struct qla_hw_data *ha = sp->vha->hw;
  35. struct bsg_job *bsg_job = sp->u.bsg_job;
  36. struct fc_bsg_request *bsg_request = bsg_job->request;
  37. struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
  38. if (sp->type == SRB_FXIOCB_BCMD) {
  39. piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
  40. &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  41. if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
  42. dma_unmap_sg(&ha->pdev->dev,
  43. bsg_job->request_payload.sg_list,
  44. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  45. if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
  46. dma_unmap_sg(&ha->pdev->dev,
  47. bsg_job->reply_payload.sg_list,
  48. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  49. } else {
  50. if (sp->remap.remapped) {
  51. dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
  52. sp->remap.rsp.dma);
  53. dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
  54. sp->remap.req.dma);
  55. } else {
  56. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  57. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  58. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  59. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  60. }
  61. }
  62. if (sp->type == SRB_CT_CMD ||
  63. sp->type == SRB_FXIOCB_BCMD ||
  64. sp->type == SRB_ELS_CMD_HST) {
  65. INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
  66. queue_work(ha->wq, &sp->fcport->free_work);
  67. }
  68. qla2x00_rel_sp(sp);
  69. }
  70. int
  71. qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
  72. struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
  73. {
  74. int i, ret, num_valid;
  75. uint8_t *bcode;
  76. struct qla_fcp_prio_entry *pri_entry;
  77. uint32_t *bcode_val_ptr, bcode_val;
  78. ret = 1;
  79. num_valid = 0;
  80. bcode = (uint8_t *)pri_cfg;
  81. bcode_val_ptr = (uint32_t *)pri_cfg;
  82. bcode_val = (uint32_t)(*bcode_val_ptr);
  83. if (bcode_val == 0xFFFFFFFF) {
  84. /* No FCP Priority config data in flash */
  85. ql_dbg(ql_dbg_user, vha, 0x7051,
  86. "No FCP Priority config data.\n");
  87. return 0;
  88. }
  89. if (memcmp(bcode, "HQOS", 4)) {
  90. /* Invalid FCP priority data header*/
  91. ql_dbg(ql_dbg_user, vha, 0x7052,
  92. "Invalid FCP Priority data header. bcode=0x%x.\n",
  93. bcode_val);
  94. return 0;
  95. }
  96. if (flag != 1)
  97. return ret;
  98. pri_entry = &pri_cfg->entry[0];
  99. for (i = 0; i < pri_cfg->num_entries; i++) {
  100. if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
  101. num_valid++;
  102. pri_entry++;
  103. }
  104. if (num_valid == 0) {
  105. /* No valid FCP priority data entries */
  106. ql_dbg(ql_dbg_user, vha, 0x7053,
  107. "No valid FCP Priority data entries.\n");
  108. ret = 0;
  109. } else {
  110. /* FCP priority data is valid */
  111. ql_dbg(ql_dbg_user, vha, 0x7054,
  112. "Valid FCP priority data. num entries = %d.\n",
  113. num_valid);
  114. }
  115. return ret;
  116. }
  117. static int
  118. qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
  119. {
  120. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  121. struct fc_bsg_request *bsg_request = bsg_job->request;
  122. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  123. scsi_qla_host_t *vha = shost_priv(host);
  124. struct qla_hw_data *ha = vha->hw;
  125. int ret = 0;
  126. uint32_t len;
  127. uint32_t oper;
  128. if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
  129. ret = -EINVAL;
  130. goto exit_fcp_prio_cfg;
  131. }
  132. /* Get the sub command */
  133. oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  134. /* Only set config is allowed if config memory is not allocated */
  135. if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
  136. ret = -EINVAL;
  137. goto exit_fcp_prio_cfg;
  138. }
  139. switch (oper) {
  140. case QLFC_FCP_PRIO_DISABLE:
  141. if (ha->flags.fcp_prio_enabled) {
  142. ha->flags.fcp_prio_enabled = 0;
  143. ha->fcp_prio_cfg->attributes &=
  144. ~FCP_PRIO_ATTR_ENABLE;
  145. qla24xx_update_all_fcp_prio(vha);
  146. bsg_reply->result = DID_OK;
  147. } else {
  148. ret = -EINVAL;
  149. bsg_reply->result = (DID_ERROR << 16);
  150. goto exit_fcp_prio_cfg;
  151. }
  152. break;
  153. case QLFC_FCP_PRIO_ENABLE:
  154. if (!ha->flags.fcp_prio_enabled) {
  155. if (ha->fcp_prio_cfg) {
  156. ha->flags.fcp_prio_enabled = 1;
  157. ha->fcp_prio_cfg->attributes |=
  158. FCP_PRIO_ATTR_ENABLE;
  159. qla24xx_update_all_fcp_prio(vha);
  160. bsg_reply->result = DID_OK;
  161. } else {
  162. ret = -EINVAL;
  163. bsg_reply->result = (DID_ERROR << 16);
  164. goto exit_fcp_prio_cfg;
  165. }
  166. }
  167. break;
  168. case QLFC_FCP_PRIO_GET_CONFIG:
  169. len = bsg_job->reply_payload.payload_len;
  170. if (!len || len > FCP_PRIO_CFG_SIZE) {
  171. ret = -EINVAL;
  172. bsg_reply->result = (DID_ERROR << 16);
  173. goto exit_fcp_prio_cfg;
  174. }
  175. bsg_reply->result = DID_OK;
  176. bsg_reply->reply_payload_rcv_len =
  177. sg_copy_from_buffer(
  178. bsg_job->reply_payload.sg_list,
  179. bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
  180. len);
  181. break;
  182. case QLFC_FCP_PRIO_SET_CONFIG:
  183. len = bsg_job->request_payload.payload_len;
  184. if (!len || len > FCP_PRIO_CFG_SIZE) {
  185. bsg_reply->result = (DID_ERROR << 16);
  186. ret = -EINVAL;
  187. goto exit_fcp_prio_cfg;
  188. }
  189. if (!ha->fcp_prio_cfg) {
  190. ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
  191. if (!ha->fcp_prio_cfg) {
  192. ql_log(ql_log_warn, vha, 0x7050,
  193. "Unable to allocate memory for fcp prio "
  194. "config data (%x).\n", FCP_PRIO_CFG_SIZE);
  195. bsg_reply->result = (DID_ERROR << 16);
  196. ret = -ENOMEM;
  197. goto exit_fcp_prio_cfg;
  198. }
  199. }
  200. memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
  201. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  202. bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
  203. FCP_PRIO_CFG_SIZE);
  204. /* validate fcp priority data */
  205. if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
  206. bsg_reply->result = (DID_ERROR << 16);
  207. ret = -EINVAL;
  208. /* If buffer was invalidatic int
  209. * fcp_prio_cfg is of no use
  210. */
  211. vfree(ha->fcp_prio_cfg);
  212. ha->fcp_prio_cfg = NULL;
  213. goto exit_fcp_prio_cfg;
  214. }
  215. ha->flags.fcp_prio_enabled = 0;
  216. if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
  217. ha->flags.fcp_prio_enabled = 1;
  218. qla24xx_update_all_fcp_prio(vha);
  219. bsg_reply->result = DID_OK;
  220. break;
  221. default:
  222. ret = -EINVAL;
  223. break;
  224. }
  225. exit_fcp_prio_cfg:
  226. if (!ret)
  227. bsg_job_done(bsg_job, bsg_reply->result,
  228. bsg_reply->reply_payload_rcv_len);
  229. return ret;
  230. }
  231. static int
  232. qla2x00_process_els(struct bsg_job *bsg_job)
  233. {
  234. struct fc_bsg_request *bsg_request = bsg_job->request;
  235. struct fc_rport *rport;
  236. fc_port_t *fcport = NULL;
  237. struct Scsi_Host *host;
  238. scsi_qla_host_t *vha;
  239. struct qla_hw_data *ha;
  240. srb_t *sp;
  241. const char *type;
  242. int req_sg_cnt, rsp_sg_cnt;
  243. int rval = (DID_ERROR << 16);
  244. uint32_t els_cmd = 0;
  245. int qla_port_allocated = 0;
  246. if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
  247. rport = fc_bsg_to_rport(bsg_job);
  248. if (!rport) {
  249. rval = -ENOMEM;
  250. goto done;
  251. }
  252. fcport = *(fc_port_t **) rport->dd_data;
  253. host = rport_to_shost(rport);
  254. vha = shost_priv(host);
  255. ha = vha->hw;
  256. type = "FC_BSG_RPT_ELS";
  257. } else {
  258. host = fc_bsg_to_shost(bsg_job);
  259. vha = shost_priv(host);
  260. ha = vha->hw;
  261. type = "FC_BSG_HST_ELS_NOLOGIN";
  262. els_cmd = bsg_request->rqst_data.h_els.command_code;
  263. if (els_cmd == ELS_AUTH_ELS)
  264. return qla_edif_process_els(vha, bsg_job);
  265. }
  266. if (!vha->flags.online) {
  267. ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
  268. rval = -EIO;
  269. goto done;
  270. }
  271. /* pass through is supported only for ISP 4Gb or higher */
  272. if (!IS_FWI2_CAPABLE(ha)) {
  273. ql_dbg(ql_dbg_user, vha, 0x7001,
  274. "ELS passthru not supported for ISP23xx based adapters.\n");
  275. rval = -EPERM;
  276. goto done;
  277. }
  278. /* Multiple SG's are not supported for ELS requests */
  279. if (bsg_job->request_payload.sg_cnt > 1 ||
  280. bsg_job->reply_payload.sg_cnt > 1) {
  281. ql_dbg(ql_dbg_user, vha, 0x7002,
  282. "Multiple SG's are not supported for ELS requests, "
  283. "request_sg_cnt=%x reply_sg_cnt=%x.\n",
  284. bsg_job->request_payload.sg_cnt,
  285. bsg_job->reply_payload.sg_cnt);
  286. rval = -EPERM;
  287. goto done;
  288. }
  289. /* ELS request for rport */
  290. if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
  291. /* make sure the rport is logged in,
  292. * if not perform fabric login
  293. */
  294. if (atomic_read(&fcport->state) != FCS_ONLINE) {
  295. ql_dbg(ql_dbg_user, vha, 0x7003,
  296. "Port %06X is not online for ELS passthru.\n",
  297. fcport->d_id.b24);
  298. rval = -EIO;
  299. goto done;
  300. }
  301. } else {
  302. /* Allocate a dummy fcport structure, since functions
  303. * preparing the IOCB and mailbox command retrieves port
  304. * specific information from fcport structure. For Host based
  305. * ELS commands there will be no fcport structure allocated
  306. */
  307. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  308. if (!fcport) {
  309. rval = -ENOMEM;
  310. goto done;
  311. }
  312. qla_port_allocated = 1;
  313. /* Initialize all required fields of fcport */
  314. fcport->vha = vha;
  315. fcport->d_id.b.al_pa =
  316. bsg_request->rqst_data.h_els.port_id[0];
  317. fcport->d_id.b.area =
  318. bsg_request->rqst_data.h_els.port_id[1];
  319. fcport->d_id.b.domain =
  320. bsg_request->rqst_data.h_els.port_id[2];
  321. fcport->loop_id =
  322. (fcport->d_id.b.al_pa == 0xFD) ?
  323. NPH_FABRIC_CONTROLLER : NPH_F_PORT;
  324. }
  325. req_sg_cnt =
  326. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  327. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  328. if (!req_sg_cnt) {
  329. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  330. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  331. rval = -ENOMEM;
  332. goto done_free_fcport;
  333. }
  334. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  335. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  336. if (!rsp_sg_cnt) {
  337. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  338. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  339. rval = -ENOMEM;
  340. goto done_free_fcport;
  341. }
  342. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  343. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  344. ql_log(ql_log_warn, vha, 0x7008,
  345. "dma mapping resulted in different sg counts, "
  346. "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
  347. "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
  348. req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
  349. rval = -EAGAIN;
  350. goto done_unmap_sg;
  351. }
  352. /* Alloc SRB structure */
  353. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  354. if (!sp) {
  355. rval = -ENOMEM;
  356. goto done_unmap_sg;
  357. }
  358. sp->type =
  359. (bsg_request->msgcode == FC_BSG_RPT_ELS ?
  360. SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
  361. sp->name =
  362. (bsg_request->msgcode == FC_BSG_RPT_ELS ?
  363. "bsg_els_rpt" : "bsg_els_hst");
  364. sp->u.bsg_job = bsg_job;
  365. sp->free = qla2x00_bsg_sp_free;
  366. sp->done = qla2x00_bsg_job_done;
  367. ql_dbg(ql_dbg_user, vha, 0x700a,
  368. "bsg rqst type: %s els type: %x - loop-id=%x "
  369. "portid=%-2x%02x%02x.\n", type,
  370. bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
  371. fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
  372. rval = qla2x00_start_sp(sp);
  373. if (rval != QLA_SUCCESS) {
  374. ql_log(ql_log_warn, vha, 0x700e,
  375. "qla2x00_start_sp failed = %d\n", rval);
  376. qla2x00_rel_sp(sp);
  377. rval = -EIO;
  378. goto done_unmap_sg;
  379. }
  380. return rval;
  381. done_unmap_sg:
  382. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  383. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  384. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  385. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  386. goto done_free_fcport;
  387. done_free_fcport:
  388. if (qla_port_allocated)
  389. qla2x00_free_fcport(fcport);
  390. done:
  391. return rval;
  392. }
  393. static inline uint16_t
  394. qla24xx_calc_ct_iocbs(uint16_t dsds)
  395. {
  396. uint16_t iocbs;
  397. iocbs = 1;
  398. if (dsds > 2) {
  399. iocbs += (dsds - 2) / 5;
  400. if ((dsds - 2) % 5)
  401. iocbs++;
  402. }
  403. return iocbs;
  404. }
  405. static int
  406. qla2x00_process_ct(struct bsg_job *bsg_job)
  407. {
  408. srb_t *sp;
  409. struct fc_bsg_request *bsg_request = bsg_job->request;
  410. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  411. scsi_qla_host_t *vha = shost_priv(host);
  412. struct qla_hw_data *ha = vha->hw;
  413. int rval = (DID_ERROR << 16);
  414. int req_sg_cnt, rsp_sg_cnt;
  415. uint16_t loop_id;
  416. struct fc_port *fcport;
  417. char *type = "FC_BSG_HST_CT";
  418. req_sg_cnt =
  419. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  420. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  421. if (!req_sg_cnt) {
  422. ql_log(ql_log_warn, vha, 0x700f,
  423. "dma_map_sg return %d for request\n", req_sg_cnt);
  424. rval = -ENOMEM;
  425. goto done;
  426. }
  427. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  428. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  429. if (!rsp_sg_cnt) {
  430. ql_log(ql_log_warn, vha, 0x7010,
  431. "dma_map_sg return %d for reply\n", rsp_sg_cnt);
  432. rval = -ENOMEM;
  433. goto done;
  434. }
  435. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  436. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  437. ql_log(ql_log_warn, vha, 0x7011,
  438. "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
  439. "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
  440. req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
  441. rval = -EAGAIN;
  442. goto done_unmap_sg;
  443. }
  444. if (!vha->flags.online) {
  445. ql_log(ql_log_warn, vha, 0x7012,
  446. "Host is not online.\n");
  447. rval = -EIO;
  448. goto done_unmap_sg;
  449. }
  450. loop_id =
  451. (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
  452. >> 24;
  453. switch (loop_id) {
  454. case 0xFC:
  455. loop_id = NPH_SNS;
  456. break;
  457. case 0xFA:
  458. loop_id = vha->mgmt_svr_loop_id;
  459. break;
  460. default:
  461. ql_dbg(ql_dbg_user, vha, 0x7013,
  462. "Unknown loop id: %x.\n", loop_id);
  463. rval = -EINVAL;
  464. goto done_unmap_sg;
  465. }
  466. /* Allocate a dummy fcport structure, since functions preparing the
  467. * IOCB and mailbox command retrieves port specific information
  468. * from fcport structure. For Host based ELS commands there will be
  469. * no fcport structure allocated
  470. */
  471. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  472. if (!fcport) {
  473. ql_log(ql_log_warn, vha, 0x7014,
  474. "Failed to allocate fcport.\n");
  475. rval = -ENOMEM;
  476. goto done_unmap_sg;
  477. }
  478. /* Initialize all required fields of fcport */
  479. fcport->vha = vha;
  480. fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
  481. fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
  482. fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
  483. fcport->loop_id = loop_id;
  484. /* Alloc SRB structure */
  485. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  486. if (!sp) {
  487. ql_log(ql_log_warn, vha, 0x7015,
  488. "qla2x00_get_sp failed.\n");
  489. rval = -ENOMEM;
  490. goto done_free_fcport;
  491. }
  492. sp->type = SRB_CT_CMD;
  493. sp->name = "bsg_ct";
  494. sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
  495. sp->u.bsg_job = bsg_job;
  496. sp->free = qla2x00_bsg_sp_free;
  497. sp->done = qla2x00_bsg_job_done;
  498. ql_dbg(ql_dbg_user, vha, 0x7016,
  499. "bsg rqst type: %s else type: %x - "
  500. "loop-id=%x portid=%02x%02x%02x.\n", type,
  501. (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
  502. fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
  503. fcport->d_id.b.al_pa);
  504. rval = qla2x00_start_sp(sp);
  505. if (rval != QLA_SUCCESS) {
  506. ql_log(ql_log_warn, vha, 0x7017,
  507. "qla2x00_start_sp failed=%d.\n", rval);
  508. qla2x00_rel_sp(sp);
  509. rval = -EIO;
  510. goto done_free_fcport;
  511. }
  512. return rval;
  513. done_free_fcport:
  514. qla2x00_free_fcport(fcport);
  515. done_unmap_sg:
  516. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  517. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  518. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  519. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  520. done:
  521. return rval;
  522. }
  523. /* Disable loopback mode */
  524. static inline int
  525. qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
  526. int wait, int wait2)
  527. {
  528. int ret = 0;
  529. int rval = 0;
  530. uint16_t new_config[4];
  531. struct qla_hw_data *ha = vha->hw;
  532. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
  533. goto done_reset_internal;
  534. memset(new_config, 0 , sizeof(new_config));
  535. if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
  536. ENABLE_INTERNAL_LOOPBACK ||
  537. (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
  538. ENABLE_EXTERNAL_LOOPBACK) {
  539. new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
  540. ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
  541. (new_config[0] & INTERNAL_LOOPBACK_MASK));
  542. memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
  543. ha->notify_dcbx_comp = wait;
  544. ha->notify_lb_portup_comp = wait2;
  545. ret = qla81xx_set_port_config(vha, new_config);
  546. if (ret != QLA_SUCCESS) {
  547. ql_log(ql_log_warn, vha, 0x7025,
  548. "Set port config failed.\n");
  549. ha->notify_dcbx_comp = 0;
  550. ha->notify_lb_portup_comp = 0;
  551. rval = -EINVAL;
  552. goto done_reset_internal;
  553. }
  554. /* Wait for DCBX complete event */
  555. if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
  556. (DCBX_COMP_TIMEOUT * HZ))) {
  557. ql_dbg(ql_dbg_user, vha, 0x7026,
  558. "DCBX completion not received.\n");
  559. ha->notify_dcbx_comp = 0;
  560. ha->notify_lb_portup_comp = 0;
  561. rval = -EINVAL;
  562. goto done_reset_internal;
  563. } else
  564. ql_dbg(ql_dbg_user, vha, 0x7027,
  565. "DCBX completion received.\n");
  566. if (wait2 &&
  567. !wait_for_completion_timeout(&ha->lb_portup_comp,
  568. (LB_PORTUP_COMP_TIMEOUT * HZ))) {
  569. ql_dbg(ql_dbg_user, vha, 0x70c5,
  570. "Port up completion not received.\n");
  571. ha->notify_lb_portup_comp = 0;
  572. rval = -EINVAL;
  573. goto done_reset_internal;
  574. } else
  575. ql_dbg(ql_dbg_user, vha, 0x70c6,
  576. "Port up completion received.\n");
  577. ha->notify_dcbx_comp = 0;
  578. ha->notify_lb_portup_comp = 0;
  579. }
  580. done_reset_internal:
  581. return rval;
  582. }
  583. /*
  584. * Set the port configuration to enable the internal or external loopback
  585. * depending on the loopback mode.
  586. */
  587. static inline int
  588. qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
  589. uint16_t *new_config, uint16_t mode)
  590. {
  591. int ret = 0;
  592. int rval = 0;
  593. unsigned long rem_tmo = 0, current_tmo = 0;
  594. struct qla_hw_data *ha = vha->hw;
  595. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
  596. goto done_set_internal;
  597. if (mode == INTERNAL_LOOPBACK)
  598. new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
  599. else if (mode == EXTERNAL_LOOPBACK)
  600. new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
  601. ql_dbg(ql_dbg_user, vha, 0x70be,
  602. "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
  603. memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
  604. ha->notify_dcbx_comp = 1;
  605. ret = qla81xx_set_port_config(vha, new_config);
  606. if (ret != QLA_SUCCESS) {
  607. ql_log(ql_log_warn, vha, 0x7021,
  608. "set port config failed.\n");
  609. ha->notify_dcbx_comp = 0;
  610. rval = -EINVAL;
  611. goto done_set_internal;
  612. }
  613. /* Wait for DCBX complete event */
  614. current_tmo = DCBX_COMP_TIMEOUT * HZ;
  615. while (1) {
  616. rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
  617. current_tmo);
  618. if (!ha->idc_extend_tmo || rem_tmo) {
  619. ha->idc_extend_tmo = 0;
  620. break;
  621. }
  622. current_tmo = ha->idc_extend_tmo * HZ;
  623. ha->idc_extend_tmo = 0;
  624. }
  625. if (!rem_tmo) {
  626. ql_dbg(ql_dbg_user, vha, 0x7022,
  627. "DCBX completion not received.\n");
  628. ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
  629. /*
  630. * If the reset of the loopback mode doesn't work take a FCoE
  631. * dump and reset the chip.
  632. */
  633. if (ret) {
  634. qla2xxx_dump_fw(vha);
  635. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  636. }
  637. rval = -EINVAL;
  638. } else {
  639. if (ha->flags.idc_compl_status) {
  640. ql_dbg(ql_dbg_user, vha, 0x70c3,
  641. "Bad status in IDC Completion AEN\n");
  642. rval = -EINVAL;
  643. ha->flags.idc_compl_status = 0;
  644. } else
  645. ql_dbg(ql_dbg_user, vha, 0x7023,
  646. "DCBX completion received.\n");
  647. }
  648. ha->notify_dcbx_comp = 0;
  649. ha->idc_extend_tmo = 0;
  650. done_set_internal:
  651. return rval;
  652. }
  653. static int
  654. qla2x00_process_loopback(struct bsg_job *bsg_job)
  655. {
  656. struct fc_bsg_request *bsg_request = bsg_job->request;
  657. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  658. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  659. scsi_qla_host_t *vha = shost_priv(host);
  660. struct qla_hw_data *ha = vha->hw;
  661. int rval;
  662. uint8_t command_sent;
  663. char *type;
  664. struct msg_echo_lb elreq;
  665. uint16_t response[MAILBOX_REGISTER_COUNT];
  666. uint16_t config[4], new_config[4];
  667. uint8_t *fw_sts_ptr;
  668. void *req_data = NULL;
  669. dma_addr_t req_data_dma;
  670. uint32_t req_data_len;
  671. uint8_t *rsp_data = NULL;
  672. dma_addr_t rsp_data_dma;
  673. uint32_t rsp_data_len;
  674. if (!vha->flags.online) {
  675. ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
  676. return -EIO;
  677. }
  678. memset(&elreq, 0, sizeof(elreq));
  679. elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
  680. bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
  681. DMA_TO_DEVICE);
  682. if (!elreq.req_sg_cnt) {
  683. ql_log(ql_log_warn, vha, 0x701a,
  684. "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
  685. return -ENOMEM;
  686. }
  687. elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
  688. bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
  689. DMA_FROM_DEVICE);
  690. if (!elreq.rsp_sg_cnt) {
  691. ql_log(ql_log_warn, vha, 0x701b,
  692. "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
  693. rval = -ENOMEM;
  694. goto done_unmap_req_sg;
  695. }
  696. if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  697. (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  698. ql_log(ql_log_warn, vha, 0x701c,
  699. "dma mapping resulted in different sg counts, "
  700. "request_sg_cnt: %x dma_request_sg_cnt: %x "
  701. "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
  702. bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
  703. bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
  704. rval = -EAGAIN;
  705. goto done_unmap_sg;
  706. }
  707. req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
  708. req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
  709. &req_data_dma, GFP_KERNEL);
  710. if (!req_data) {
  711. ql_log(ql_log_warn, vha, 0x701d,
  712. "dma alloc failed for req_data.\n");
  713. rval = -ENOMEM;
  714. goto done_unmap_sg;
  715. }
  716. rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
  717. &rsp_data_dma, GFP_KERNEL);
  718. if (!rsp_data) {
  719. ql_log(ql_log_warn, vha, 0x7004,
  720. "dma alloc failed for rsp_data.\n");
  721. rval = -ENOMEM;
  722. goto done_free_dma_req;
  723. }
  724. /* Copy the request buffer in req_data now */
  725. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  726. bsg_job->request_payload.sg_cnt, req_data, req_data_len);
  727. elreq.send_dma = req_data_dma;
  728. elreq.rcv_dma = rsp_data_dma;
  729. elreq.transfer_size = req_data_len;
  730. elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  731. elreq.iteration_count =
  732. bsg_request->rqst_data.h_vendor.vendor_cmd[2];
  733. if (atomic_read(&vha->loop_state) == LOOP_READY &&
  734. ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
  735. ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
  736. get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
  737. req_data_len == MAX_ELS_FRAME_PAYLOAD &&
  738. elreq.options == EXTERNAL_LOOPBACK))) {
  739. type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
  740. ql_dbg(ql_dbg_user, vha, 0x701e,
  741. "BSG request type: %s.\n", type);
  742. command_sent = INT_DEF_LB_ECHO_CMD;
  743. rval = qla2x00_echo_test(vha, &elreq, response);
  744. } else {
  745. if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
  746. memset(config, 0, sizeof(config));
  747. memset(new_config, 0, sizeof(new_config));
  748. if (qla81xx_get_port_config(vha, config)) {
  749. ql_log(ql_log_warn, vha, 0x701f,
  750. "Get port config failed.\n");
  751. rval = -EPERM;
  752. goto done_free_dma_rsp;
  753. }
  754. if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
  755. ql_dbg(ql_dbg_user, vha, 0x70c4,
  756. "Loopback operation already in "
  757. "progress.\n");
  758. rval = -EAGAIN;
  759. goto done_free_dma_rsp;
  760. }
  761. ql_dbg(ql_dbg_user, vha, 0x70c0,
  762. "elreq.options=%04x\n", elreq.options);
  763. if (elreq.options == EXTERNAL_LOOPBACK)
  764. if (IS_QLA8031(ha) || IS_QLA8044(ha))
  765. rval = qla81xx_set_loopback_mode(vha,
  766. config, new_config, elreq.options);
  767. else
  768. rval = qla81xx_reset_loopback_mode(vha,
  769. config, 1, 0);
  770. else
  771. rval = qla81xx_set_loopback_mode(vha, config,
  772. new_config, elreq.options);
  773. if (rval) {
  774. rval = -EPERM;
  775. goto done_free_dma_rsp;
  776. }
  777. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  778. ql_dbg(ql_dbg_user, vha, 0x7028,
  779. "BSG request type: %s.\n", type);
  780. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  781. rval = qla2x00_loopback_test(vha, &elreq, response);
  782. if (response[0] == MBS_COMMAND_ERROR &&
  783. response[1] == MBS_LB_RESET) {
  784. ql_log(ql_log_warn, vha, 0x7029,
  785. "MBX command error, Aborting ISP.\n");
  786. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  787. qla2xxx_wake_dpc(vha);
  788. qla2x00_wait_for_chip_reset(vha);
  789. /* Also reset the MPI */
  790. if (IS_QLA81XX(ha)) {
  791. if (qla81xx_restart_mpi_firmware(vha) !=
  792. QLA_SUCCESS) {
  793. ql_log(ql_log_warn, vha, 0x702a,
  794. "MPI reset failed.\n");
  795. }
  796. }
  797. rval = -EIO;
  798. goto done_free_dma_rsp;
  799. }
  800. if (new_config[0]) {
  801. int ret;
  802. /* Revert back to original port config
  803. * Also clear internal loopback
  804. */
  805. ret = qla81xx_reset_loopback_mode(vha,
  806. new_config, 0, 1);
  807. if (ret) {
  808. /*
  809. * If the reset of the loopback mode
  810. * doesn't work take FCoE dump and then
  811. * reset the chip.
  812. */
  813. qla2xxx_dump_fw(vha);
  814. set_bit(ISP_ABORT_NEEDED,
  815. &vha->dpc_flags);
  816. }
  817. }
  818. } else {
  819. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  820. ql_dbg(ql_dbg_user, vha, 0x702b,
  821. "BSG request type: %s.\n", type);
  822. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  823. rval = qla2x00_loopback_test(vha, &elreq, response);
  824. }
  825. }
  826. if (rval) {
  827. ql_log(ql_log_warn, vha, 0x702c,
  828. "Vendor request %s failed.\n", type);
  829. rval = 0;
  830. bsg_reply->result = (DID_ERROR << 16);
  831. bsg_reply->reply_payload_rcv_len = 0;
  832. } else {
  833. ql_dbg(ql_dbg_user, vha, 0x702d,
  834. "Vendor request %s completed.\n", type);
  835. bsg_reply->result = (DID_OK << 16);
  836. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  837. bsg_job->reply_payload.sg_cnt, rsp_data,
  838. rsp_data_len);
  839. }
  840. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  841. sizeof(response) + sizeof(uint8_t);
  842. fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
  843. memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
  844. sizeof(response));
  845. fw_sts_ptr += sizeof(response);
  846. *fw_sts_ptr = command_sent;
  847. done_free_dma_rsp:
  848. dma_free_coherent(&ha->pdev->dev, rsp_data_len,
  849. rsp_data, rsp_data_dma);
  850. done_free_dma_req:
  851. dma_free_coherent(&ha->pdev->dev, req_data_len,
  852. req_data, req_data_dma);
  853. done_unmap_sg:
  854. dma_unmap_sg(&ha->pdev->dev,
  855. bsg_job->reply_payload.sg_list,
  856. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  857. done_unmap_req_sg:
  858. dma_unmap_sg(&ha->pdev->dev,
  859. bsg_job->request_payload.sg_list,
  860. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  861. if (!rval)
  862. bsg_job_done(bsg_job, bsg_reply->result,
  863. bsg_reply->reply_payload_rcv_len);
  864. return rval;
  865. }
  866. static int
  867. qla84xx_reset(struct bsg_job *bsg_job)
  868. {
  869. struct fc_bsg_request *bsg_request = bsg_job->request;
  870. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  871. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  872. scsi_qla_host_t *vha = shost_priv(host);
  873. struct qla_hw_data *ha = vha->hw;
  874. int rval = 0;
  875. uint32_t flag;
  876. if (!IS_QLA84XX(ha)) {
  877. ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
  878. return -EINVAL;
  879. }
  880. flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  881. rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
  882. if (rval) {
  883. ql_log(ql_log_warn, vha, 0x7030,
  884. "Vendor request 84xx reset failed.\n");
  885. rval = (DID_ERROR << 16);
  886. } else {
  887. ql_dbg(ql_dbg_user, vha, 0x7031,
  888. "Vendor request 84xx reset completed.\n");
  889. bsg_reply->result = DID_OK;
  890. bsg_job_done(bsg_job, bsg_reply->result,
  891. bsg_reply->reply_payload_rcv_len);
  892. }
  893. return rval;
  894. }
  895. static int
  896. qla84xx_updatefw(struct bsg_job *bsg_job)
  897. {
  898. struct fc_bsg_request *bsg_request = bsg_job->request;
  899. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  900. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  901. scsi_qla_host_t *vha = shost_priv(host);
  902. struct qla_hw_data *ha = vha->hw;
  903. struct verify_chip_entry_84xx *mn = NULL;
  904. dma_addr_t mn_dma, fw_dma;
  905. void *fw_buf = NULL;
  906. int rval = 0;
  907. uint32_t sg_cnt;
  908. uint32_t data_len;
  909. uint16_t options;
  910. uint32_t flag;
  911. uint32_t fw_ver;
  912. if (!IS_QLA84XX(ha)) {
  913. ql_dbg(ql_dbg_user, vha, 0x7032,
  914. "Not 84xx, exiting.\n");
  915. return -EINVAL;
  916. }
  917. sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  918. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  919. if (!sg_cnt) {
  920. ql_log(ql_log_warn, vha, 0x7033,
  921. "dma_map_sg returned %d for request.\n", sg_cnt);
  922. return -ENOMEM;
  923. }
  924. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  925. ql_log(ql_log_warn, vha, 0x7034,
  926. "DMA mapping resulted in different sg counts, "
  927. "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
  928. bsg_job->request_payload.sg_cnt, sg_cnt);
  929. rval = -EAGAIN;
  930. goto done_unmap_sg;
  931. }
  932. data_len = bsg_job->request_payload.payload_len;
  933. fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
  934. &fw_dma, GFP_KERNEL);
  935. if (!fw_buf) {
  936. ql_log(ql_log_warn, vha, 0x7035,
  937. "DMA alloc failed for fw_buf.\n");
  938. rval = -ENOMEM;
  939. goto done_unmap_sg;
  940. }
  941. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  942. bsg_job->request_payload.sg_cnt, fw_buf, data_len);
  943. mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  944. if (!mn) {
  945. ql_log(ql_log_warn, vha, 0x7036,
  946. "DMA alloc failed for fw buffer.\n");
  947. rval = -ENOMEM;
  948. goto done_free_fw_buf;
  949. }
  950. flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  951. fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
  952. mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
  953. mn->entry_count = 1;
  954. options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
  955. if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
  956. options |= VCO_DIAG_FW;
  957. mn->options = cpu_to_le16(options);
  958. mn->fw_ver = cpu_to_le32(fw_ver);
  959. mn->fw_size = cpu_to_le32(data_len);
  960. mn->fw_seq_size = cpu_to_le32(data_len);
  961. put_unaligned_le64(fw_dma, &mn->dsd.address);
  962. mn->dsd.length = cpu_to_le32(data_len);
  963. mn->data_seg_cnt = cpu_to_le16(1);
  964. rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
  965. if (rval) {
  966. ql_log(ql_log_warn, vha, 0x7037,
  967. "Vendor request 84xx updatefw failed.\n");
  968. rval = (DID_ERROR << 16);
  969. } else {
  970. ql_dbg(ql_dbg_user, vha, 0x7038,
  971. "Vendor request 84xx updatefw completed.\n");
  972. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  973. bsg_reply->result = DID_OK;
  974. }
  975. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  976. done_free_fw_buf:
  977. dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
  978. done_unmap_sg:
  979. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  980. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  981. if (!rval)
  982. bsg_job_done(bsg_job, bsg_reply->result,
  983. bsg_reply->reply_payload_rcv_len);
  984. return rval;
  985. }
  986. static int
  987. qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
  988. {
  989. struct fc_bsg_request *bsg_request = bsg_job->request;
  990. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  991. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  992. scsi_qla_host_t *vha = shost_priv(host);
  993. struct qla_hw_data *ha = vha->hw;
  994. struct access_chip_84xx *mn = NULL;
  995. dma_addr_t mn_dma, mgmt_dma;
  996. void *mgmt_b = NULL;
  997. int rval = 0;
  998. struct qla_bsg_a84_mgmt *ql84_mgmt;
  999. uint32_t sg_cnt;
  1000. uint32_t data_len = 0;
  1001. uint32_t dma_direction = DMA_NONE;
  1002. if (!IS_QLA84XX(ha)) {
  1003. ql_log(ql_log_warn, vha, 0x703a,
  1004. "Not 84xx, exiting.\n");
  1005. return -EINVAL;
  1006. }
  1007. mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  1008. if (!mn) {
  1009. ql_log(ql_log_warn, vha, 0x703c,
  1010. "DMA alloc failed for fw buffer.\n");
  1011. return -ENOMEM;
  1012. }
  1013. mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
  1014. mn->entry_count = 1;
  1015. ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
  1016. switch (ql84_mgmt->mgmt.cmd) {
  1017. case QLA84_MGMT_READ_MEM:
  1018. case QLA84_MGMT_GET_INFO:
  1019. sg_cnt = dma_map_sg(&ha->pdev->dev,
  1020. bsg_job->reply_payload.sg_list,
  1021. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1022. if (!sg_cnt) {
  1023. ql_log(ql_log_warn, vha, 0x703d,
  1024. "dma_map_sg returned %d for reply.\n", sg_cnt);
  1025. rval = -ENOMEM;
  1026. goto exit_mgmt;
  1027. }
  1028. dma_direction = DMA_FROM_DEVICE;
  1029. if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
  1030. ql_log(ql_log_warn, vha, 0x703e,
  1031. "DMA mapping resulted in different sg counts, "
  1032. "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
  1033. bsg_job->reply_payload.sg_cnt, sg_cnt);
  1034. rval = -EAGAIN;
  1035. goto done_unmap_sg;
  1036. }
  1037. data_len = bsg_job->reply_payload.payload_len;
  1038. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  1039. &mgmt_dma, GFP_KERNEL);
  1040. if (!mgmt_b) {
  1041. ql_log(ql_log_warn, vha, 0x703f,
  1042. "DMA alloc failed for mgmt_b.\n");
  1043. rval = -ENOMEM;
  1044. goto done_unmap_sg;
  1045. }
  1046. if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
  1047. mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
  1048. mn->parameter1 =
  1049. cpu_to_le32(
  1050. ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  1051. } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
  1052. mn->options = cpu_to_le16(ACO_REQUEST_INFO);
  1053. mn->parameter1 =
  1054. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
  1055. mn->parameter2 =
  1056. cpu_to_le32(
  1057. ql84_mgmt->mgmt.mgmtp.u.info.context);
  1058. }
  1059. break;
  1060. case QLA84_MGMT_WRITE_MEM:
  1061. sg_cnt = dma_map_sg(&ha->pdev->dev,
  1062. bsg_job->request_payload.sg_list,
  1063. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1064. if (!sg_cnt) {
  1065. ql_log(ql_log_warn, vha, 0x7040,
  1066. "dma_map_sg returned %d.\n", sg_cnt);
  1067. rval = -ENOMEM;
  1068. goto exit_mgmt;
  1069. }
  1070. dma_direction = DMA_TO_DEVICE;
  1071. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  1072. ql_log(ql_log_warn, vha, 0x7041,
  1073. "DMA mapping resulted in different sg counts, "
  1074. "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
  1075. bsg_job->request_payload.sg_cnt, sg_cnt);
  1076. rval = -EAGAIN;
  1077. goto done_unmap_sg;
  1078. }
  1079. data_len = bsg_job->request_payload.payload_len;
  1080. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  1081. &mgmt_dma, GFP_KERNEL);
  1082. if (!mgmt_b) {
  1083. ql_log(ql_log_warn, vha, 0x7042,
  1084. "DMA alloc failed for mgmt_b.\n");
  1085. rval = -ENOMEM;
  1086. goto done_unmap_sg;
  1087. }
  1088. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1089. bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
  1090. mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
  1091. mn->parameter1 =
  1092. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  1093. break;
  1094. case QLA84_MGMT_CHNG_CONFIG:
  1095. mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
  1096. mn->parameter1 =
  1097. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
  1098. mn->parameter2 =
  1099. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
  1100. mn->parameter3 =
  1101. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
  1102. break;
  1103. default:
  1104. rval = -EIO;
  1105. goto exit_mgmt;
  1106. }
  1107. if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
  1108. mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
  1109. mn->dseg_count = cpu_to_le16(1);
  1110. put_unaligned_le64(mgmt_dma, &mn->dsd.address);
  1111. mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
  1112. }
  1113. rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
  1114. if (rval) {
  1115. ql_log(ql_log_warn, vha, 0x7043,
  1116. "Vendor request 84xx mgmt failed.\n");
  1117. rval = (DID_ERROR << 16);
  1118. } else {
  1119. ql_dbg(ql_dbg_user, vha, 0x7044,
  1120. "Vendor request 84xx mgmt completed.\n");
  1121. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1122. bsg_reply->result = DID_OK;
  1123. if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
  1124. (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
  1125. bsg_reply->reply_payload_rcv_len =
  1126. bsg_job->reply_payload.payload_len;
  1127. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1128. bsg_job->reply_payload.sg_cnt, mgmt_b,
  1129. data_len);
  1130. }
  1131. }
  1132. done_unmap_sg:
  1133. if (mgmt_b)
  1134. dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
  1135. if (dma_direction == DMA_TO_DEVICE)
  1136. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  1137. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1138. else if (dma_direction == DMA_FROM_DEVICE)
  1139. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  1140. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1141. exit_mgmt:
  1142. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  1143. if (!rval)
  1144. bsg_job_done(bsg_job, bsg_reply->result,
  1145. bsg_reply->reply_payload_rcv_len);
  1146. return rval;
  1147. }
  1148. static int
  1149. qla24xx_iidma(struct bsg_job *bsg_job)
  1150. {
  1151. struct fc_bsg_request *bsg_request = bsg_job->request;
  1152. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1153. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1154. scsi_qla_host_t *vha = shost_priv(host);
  1155. int rval = 0;
  1156. struct qla_port_param *port_param = NULL;
  1157. fc_port_t *fcport = NULL;
  1158. int found = 0;
  1159. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1160. uint8_t *rsp_ptr = NULL;
  1161. if (!IS_IIDMA_CAPABLE(vha->hw)) {
  1162. ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
  1163. return -EINVAL;
  1164. }
  1165. port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
  1166. if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
  1167. ql_log(ql_log_warn, vha, 0x7048,
  1168. "Invalid destination type.\n");
  1169. return -EINVAL;
  1170. }
  1171. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1172. if (fcport->port_type != FCT_TARGET)
  1173. continue;
  1174. if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
  1175. fcport->port_name, sizeof(fcport->port_name)))
  1176. continue;
  1177. found = 1;
  1178. break;
  1179. }
  1180. if (!found) {
  1181. ql_log(ql_log_warn, vha, 0x7049,
  1182. "Failed to find port.\n");
  1183. return -EINVAL;
  1184. }
  1185. if (atomic_read(&fcport->state) != FCS_ONLINE) {
  1186. ql_log(ql_log_warn, vha, 0x704a,
  1187. "Port is not online.\n");
  1188. return -EINVAL;
  1189. }
  1190. if (fcport->flags & FCF_LOGIN_NEEDED) {
  1191. ql_log(ql_log_warn, vha, 0x704b,
  1192. "Remote port not logged in flags = 0x%x.\n", fcport->flags);
  1193. return -EINVAL;
  1194. }
  1195. if (port_param->mode)
  1196. rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
  1197. port_param->speed, mb);
  1198. else
  1199. rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
  1200. &port_param->speed, mb);
  1201. if (rval) {
  1202. ql_log(ql_log_warn, vha, 0x704c,
  1203. "iiDMA cmd failed for %8phN -- "
  1204. "%04x %x %04x %04x.\n", fcport->port_name,
  1205. rval, fcport->fp_speed, mb[0], mb[1]);
  1206. rval = (DID_ERROR << 16);
  1207. } else {
  1208. if (!port_param->mode) {
  1209. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  1210. sizeof(struct qla_port_param);
  1211. rsp_ptr = ((uint8_t *)bsg_reply) +
  1212. sizeof(struct fc_bsg_reply);
  1213. memcpy(rsp_ptr, port_param,
  1214. sizeof(struct qla_port_param));
  1215. }
  1216. bsg_reply->result = DID_OK;
  1217. bsg_job_done(bsg_job, bsg_reply->result,
  1218. bsg_reply->reply_payload_rcv_len);
  1219. }
  1220. return rval;
  1221. }
  1222. static int
  1223. qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
  1224. uint8_t is_update)
  1225. {
  1226. struct fc_bsg_request *bsg_request = bsg_job->request;
  1227. uint32_t start = 0;
  1228. int valid = 0;
  1229. struct qla_hw_data *ha = vha->hw;
  1230. if (unlikely(pci_channel_offline(ha->pdev)))
  1231. return -EINVAL;
  1232. start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  1233. if (start > ha->optrom_size) {
  1234. ql_log(ql_log_warn, vha, 0x7055,
  1235. "start %d > optrom_size %d.\n", start, ha->optrom_size);
  1236. return -EINVAL;
  1237. }
  1238. if (ha->optrom_state != QLA_SWAITING) {
  1239. ql_log(ql_log_info, vha, 0x7056,
  1240. "optrom_state %d.\n", ha->optrom_state);
  1241. return -EBUSY;
  1242. }
  1243. ha->optrom_region_start = start;
  1244. ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
  1245. if (is_update) {
  1246. if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
  1247. valid = 1;
  1248. else if (start == (ha->flt_region_boot * 4) ||
  1249. start == (ha->flt_region_fw * 4))
  1250. valid = 1;
  1251. else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
  1252. IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
  1253. IS_QLA28XX(ha))
  1254. valid = 1;
  1255. if (!valid) {
  1256. ql_log(ql_log_warn, vha, 0x7058,
  1257. "Invalid start region 0x%x/0x%x.\n", start,
  1258. bsg_job->request_payload.payload_len);
  1259. return -EINVAL;
  1260. }
  1261. ha->optrom_region_size = start +
  1262. bsg_job->request_payload.payload_len > ha->optrom_size ?
  1263. ha->optrom_size - start :
  1264. bsg_job->request_payload.payload_len;
  1265. ha->optrom_state = QLA_SWRITING;
  1266. } else {
  1267. ha->optrom_region_size = start +
  1268. bsg_job->reply_payload.payload_len > ha->optrom_size ?
  1269. ha->optrom_size - start :
  1270. bsg_job->reply_payload.payload_len;
  1271. ha->optrom_state = QLA_SREADING;
  1272. }
  1273. ha->optrom_buffer = vzalloc(ha->optrom_region_size);
  1274. if (!ha->optrom_buffer) {
  1275. ql_log(ql_log_warn, vha, 0x7059,
  1276. "Read: Unable to allocate memory for optrom retrieval "
  1277. "(%x)\n", ha->optrom_region_size);
  1278. ha->optrom_state = QLA_SWAITING;
  1279. return -ENOMEM;
  1280. }
  1281. return 0;
  1282. }
  1283. static int
  1284. qla2x00_read_optrom(struct bsg_job *bsg_job)
  1285. {
  1286. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1287. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1288. scsi_qla_host_t *vha = shost_priv(host);
  1289. struct qla_hw_data *ha = vha->hw;
  1290. int rval = 0;
  1291. if (ha->flags.nic_core_reset_hdlr_active)
  1292. return -EBUSY;
  1293. mutex_lock(&ha->optrom_mutex);
  1294. rval = qla2x00_optrom_setup(bsg_job, vha, 0);
  1295. if (rval) {
  1296. mutex_unlock(&ha->optrom_mutex);
  1297. return rval;
  1298. }
  1299. ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
  1300. ha->optrom_region_start, ha->optrom_region_size);
  1301. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1302. bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
  1303. ha->optrom_region_size);
  1304. bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
  1305. bsg_reply->result = DID_OK;
  1306. vfree(ha->optrom_buffer);
  1307. ha->optrom_buffer = NULL;
  1308. ha->optrom_state = QLA_SWAITING;
  1309. mutex_unlock(&ha->optrom_mutex);
  1310. bsg_job_done(bsg_job, bsg_reply->result,
  1311. bsg_reply->reply_payload_rcv_len);
  1312. return rval;
  1313. }
  1314. static int
  1315. qla2x00_update_optrom(struct bsg_job *bsg_job)
  1316. {
  1317. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1318. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1319. scsi_qla_host_t *vha = shost_priv(host);
  1320. struct qla_hw_data *ha = vha->hw;
  1321. int rval = 0;
  1322. mutex_lock(&ha->optrom_mutex);
  1323. rval = qla2x00_optrom_setup(bsg_job, vha, 1);
  1324. if (rval) {
  1325. mutex_unlock(&ha->optrom_mutex);
  1326. return rval;
  1327. }
  1328. /* Set the isp82xx_no_md_cap not to capture minidump */
  1329. ha->flags.isp82xx_no_md_cap = 1;
  1330. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1331. bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
  1332. ha->optrom_region_size);
  1333. rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
  1334. ha->optrom_region_start, ha->optrom_region_size);
  1335. if (rval) {
  1336. bsg_reply->result = -EINVAL;
  1337. rval = -EINVAL;
  1338. } else {
  1339. bsg_reply->result = DID_OK;
  1340. }
  1341. vfree(ha->optrom_buffer);
  1342. ha->optrom_buffer = NULL;
  1343. ha->optrom_state = QLA_SWAITING;
  1344. mutex_unlock(&ha->optrom_mutex);
  1345. bsg_job_done(bsg_job, bsg_reply->result,
  1346. bsg_reply->reply_payload_rcv_len);
  1347. return rval;
  1348. }
  1349. static int
  1350. qla2x00_update_fru_versions(struct bsg_job *bsg_job)
  1351. {
  1352. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1353. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1354. scsi_qla_host_t *vha = shost_priv(host);
  1355. struct qla_hw_data *ha = vha->hw;
  1356. int rval = 0;
  1357. uint8_t bsg[DMA_POOL_SIZE];
  1358. struct qla_image_version_list *list = (void *)bsg;
  1359. struct qla_image_version *image;
  1360. uint32_t count;
  1361. dma_addr_t sfp_dma;
  1362. void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1363. if (!sfp) {
  1364. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1365. EXT_STATUS_NO_MEMORY;
  1366. goto done;
  1367. }
  1368. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1369. bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
  1370. image = list->version;
  1371. count = list->count;
  1372. while (count--) {
  1373. memcpy(sfp, &image->field_info, sizeof(image->field_info));
  1374. rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
  1375. image->field_address.device, image->field_address.offset,
  1376. sizeof(image->field_info), image->field_address.option);
  1377. if (rval) {
  1378. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1379. EXT_STATUS_MAILBOX;
  1380. goto dealloc;
  1381. }
  1382. image++;
  1383. }
  1384. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1385. dealloc:
  1386. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1387. done:
  1388. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1389. bsg_reply->result = DID_OK << 16;
  1390. bsg_job_done(bsg_job, bsg_reply->result,
  1391. bsg_reply->reply_payload_rcv_len);
  1392. return 0;
  1393. }
  1394. static int
  1395. qla2x00_read_fru_status(struct bsg_job *bsg_job)
  1396. {
  1397. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1398. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1399. scsi_qla_host_t *vha = shost_priv(host);
  1400. struct qla_hw_data *ha = vha->hw;
  1401. int rval = 0;
  1402. uint8_t bsg[DMA_POOL_SIZE];
  1403. struct qla_status_reg *sr = (void *)bsg;
  1404. dma_addr_t sfp_dma;
  1405. uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1406. if (!sfp) {
  1407. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1408. EXT_STATUS_NO_MEMORY;
  1409. goto done;
  1410. }
  1411. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1412. bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
  1413. rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
  1414. sr->field_address.device, sr->field_address.offset,
  1415. sizeof(sr->status_reg), sr->field_address.option);
  1416. sr->status_reg = *sfp;
  1417. if (rval) {
  1418. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1419. EXT_STATUS_MAILBOX;
  1420. goto dealloc;
  1421. }
  1422. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1423. bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
  1424. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1425. dealloc:
  1426. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1427. done:
  1428. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1429. bsg_reply->reply_payload_rcv_len = sizeof(*sr);
  1430. bsg_reply->result = DID_OK << 16;
  1431. bsg_job_done(bsg_job, bsg_reply->result,
  1432. bsg_reply->reply_payload_rcv_len);
  1433. return 0;
  1434. }
  1435. static int
  1436. qla2x00_write_fru_status(struct bsg_job *bsg_job)
  1437. {
  1438. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1439. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1440. scsi_qla_host_t *vha = shost_priv(host);
  1441. struct qla_hw_data *ha = vha->hw;
  1442. int rval = 0;
  1443. uint8_t bsg[DMA_POOL_SIZE];
  1444. struct qla_status_reg *sr = (void *)bsg;
  1445. dma_addr_t sfp_dma;
  1446. uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1447. if (!sfp) {
  1448. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1449. EXT_STATUS_NO_MEMORY;
  1450. goto done;
  1451. }
  1452. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1453. bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
  1454. *sfp = sr->status_reg;
  1455. rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
  1456. sr->field_address.device, sr->field_address.offset,
  1457. sizeof(sr->status_reg), sr->field_address.option);
  1458. if (rval) {
  1459. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1460. EXT_STATUS_MAILBOX;
  1461. goto dealloc;
  1462. }
  1463. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1464. dealloc:
  1465. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1466. done:
  1467. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1468. bsg_reply->result = DID_OK << 16;
  1469. bsg_job_done(bsg_job, bsg_reply->result,
  1470. bsg_reply->reply_payload_rcv_len);
  1471. return 0;
  1472. }
  1473. static int
  1474. qla2x00_write_i2c(struct bsg_job *bsg_job)
  1475. {
  1476. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1477. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1478. scsi_qla_host_t *vha = shost_priv(host);
  1479. struct qla_hw_data *ha = vha->hw;
  1480. int rval = 0;
  1481. uint8_t bsg[DMA_POOL_SIZE];
  1482. struct qla_i2c_access *i2c = (void *)bsg;
  1483. dma_addr_t sfp_dma;
  1484. uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1485. if (!sfp) {
  1486. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1487. EXT_STATUS_NO_MEMORY;
  1488. goto done;
  1489. }
  1490. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1491. bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
  1492. memcpy(sfp, i2c->buffer, i2c->length);
  1493. rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
  1494. i2c->device, i2c->offset, i2c->length, i2c->option);
  1495. if (rval) {
  1496. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1497. EXT_STATUS_MAILBOX;
  1498. goto dealloc;
  1499. }
  1500. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1501. dealloc:
  1502. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1503. done:
  1504. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1505. bsg_reply->result = DID_OK << 16;
  1506. bsg_job_done(bsg_job, bsg_reply->result,
  1507. bsg_reply->reply_payload_rcv_len);
  1508. return 0;
  1509. }
  1510. static int
  1511. qla2x00_read_i2c(struct bsg_job *bsg_job)
  1512. {
  1513. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1514. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1515. scsi_qla_host_t *vha = shost_priv(host);
  1516. struct qla_hw_data *ha = vha->hw;
  1517. int rval = 0;
  1518. uint8_t bsg[DMA_POOL_SIZE];
  1519. struct qla_i2c_access *i2c = (void *)bsg;
  1520. dma_addr_t sfp_dma;
  1521. uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1522. if (!sfp) {
  1523. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1524. EXT_STATUS_NO_MEMORY;
  1525. goto done;
  1526. }
  1527. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1528. bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
  1529. rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
  1530. i2c->device, i2c->offset, i2c->length, i2c->option);
  1531. if (rval) {
  1532. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1533. EXT_STATUS_MAILBOX;
  1534. goto dealloc;
  1535. }
  1536. memcpy(i2c->buffer, sfp, i2c->length);
  1537. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1538. bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
  1539. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1540. dealloc:
  1541. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1542. done:
  1543. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1544. bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
  1545. bsg_reply->result = DID_OK << 16;
  1546. bsg_job_done(bsg_job, bsg_reply->result,
  1547. bsg_reply->reply_payload_rcv_len);
  1548. return 0;
  1549. }
  1550. static int
  1551. qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
  1552. {
  1553. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1554. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1555. scsi_qla_host_t *vha = shost_priv(host);
  1556. struct qla_hw_data *ha = vha->hw;
  1557. uint32_t rval = EXT_STATUS_OK;
  1558. uint16_t req_sg_cnt = 0;
  1559. uint16_t rsp_sg_cnt = 0;
  1560. uint16_t nextlid = 0;
  1561. uint32_t tot_dsds;
  1562. srb_t *sp = NULL;
  1563. uint32_t req_data_len;
  1564. uint32_t rsp_data_len;
  1565. /* Check the type of the adapter */
  1566. if (!IS_BIDI_CAPABLE(ha)) {
  1567. ql_log(ql_log_warn, vha, 0x70a0,
  1568. "This adapter is not supported\n");
  1569. rval = EXT_STATUS_NOT_SUPPORTED;
  1570. goto done;
  1571. }
  1572. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  1573. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  1574. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  1575. rval = EXT_STATUS_BUSY;
  1576. goto done;
  1577. }
  1578. /* Check if host is online */
  1579. if (!vha->flags.online) {
  1580. ql_log(ql_log_warn, vha, 0x70a1,
  1581. "Host is not online\n");
  1582. rval = EXT_STATUS_DEVICE_OFFLINE;
  1583. goto done;
  1584. }
  1585. /* Check if cable is plugged in or not */
  1586. if (vha->device_flags & DFLG_NO_CABLE) {
  1587. ql_log(ql_log_warn, vha, 0x70a2,
  1588. "Cable is unplugged...\n");
  1589. rval = EXT_STATUS_INVALID_CFG;
  1590. goto done;
  1591. }
  1592. /* Check if the switch is connected or not */
  1593. if (ha->current_topology != ISP_CFG_F) {
  1594. ql_log(ql_log_warn, vha, 0x70a3,
  1595. "Host is not connected to the switch\n");
  1596. rval = EXT_STATUS_INVALID_CFG;
  1597. goto done;
  1598. }
  1599. /* Check if operating mode is P2P */
  1600. if (ha->operating_mode != P2P) {
  1601. ql_log(ql_log_warn, vha, 0x70a4,
  1602. "Host operating mode is not P2p\n");
  1603. rval = EXT_STATUS_INVALID_CFG;
  1604. goto done;
  1605. }
  1606. mutex_lock(&ha->selflogin_lock);
  1607. if (vha->self_login_loop_id == 0) {
  1608. /* Initialize all required fields of fcport */
  1609. vha->bidir_fcport.vha = vha;
  1610. vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
  1611. vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
  1612. vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
  1613. vha->bidir_fcport.loop_id = vha->loop_id;
  1614. if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
  1615. ql_log(ql_log_warn, vha, 0x70a7,
  1616. "Failed to login port %06X for bidirectional IOCB\n",
  1617. vha->bidir_fcport.d_id.b24);
  1618. mutex_unlock(&ha->selflogin_lock);
  1619. rval = EXT_STATUS_MAILBOX;
  1620. goto done;
  1621. }
  1622. vha->self_login_loop_id = nextlid - 1;
  1623. }
  1624. /* Assign the self login loop id to fcport */
  1625. mutex_unlock(&ha->selflogin_lock);
  1626. vha->bidir_fcport.loop_id = vha->self_login_loop_id;
  1627. req_sg_cnt = dma_map_sg(&ha->pdev->dev,
  1628. bsg_job->request_payload.sg_list,
  1629. bsg_job->request_payload.sg_cnt,
  1630. DMA_TO_DEVICE);
  1631. if (!req_sg_cnt) {
  1632. rval = EXT_STATUS_NO_MEMORY;
  1633. goto done;
  1634. }
  1635. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
  1636. bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
  1637. DMA_FROM_DEVICE);
  1638. if (!rsp_sg_cnt) {
  1639. rval = EXT_STATUS_NO_MEMORY;
  1640. goto done_unmap_req_sg;
  1641. }
  1642. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  1643. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  1644. ql_dbg(ql_dbg_user, vha, 0x70a9,
  1645. "Dma mapping resulted in different sg counts "
  1646. "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
  1647. "%x dma_reply_sg_cnt: %x]\n",
  1648. bsg_job->request_payload.sg_cnt, req_sg_cnt,
  1649. bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
  1650. rval = EXT_STATUS_NO_MEMORY;
  1651. goto done_unmap_sg;
  1652. }
  1653. req_data_len = bsg_job->request_payload.payload_len;
  1654. rsp_data_len = bsg_job->reply_payload.payload_len;
  1655. if (req_data_len != rsp_data_len) {
  1656. rval = EXT_STATUS_BUSY;
  1657. ql_log(ql_log_warn, vha, 0x70aa,
  1658. "req_data_len != rsp_data_len\n");
  1659. goto done_unmap_sg;
  1660. }
  1661. /* Alloc SRB structure */
  1662. sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
  1663. if (!sp) {
  1664. ql_dbg(ql_dbg_user, vha, 0x70ac,
  1665. "Alloc SRB structure failed\n");
  1666. rval = EXT_STATUS_NO_MEMORY;
  1667. goto done_unmap_sg;
  1668. }
  1669. /*Populate srb->ctx with bidir ctx*/
  1670. sp->u.bsg_job = bsg_job;
  1671. sp->free = qla2x00_bsg_sp_free;
  1672. sp->type = SRB_BIDI_CMD;
  1673. sp->done = qla2x00_bsg_job_done;
  1674. /* Add the read and write sg count */
  1675. tot_dsds = rsp_sg_cnt + req_sg_cnt;
  1676. rval = qla2x00_start_bidir(sp, vha, tot_dsds);
  1677. if (rval != EXT_STATUS_OK)
  1678. goto done_free_srb;
  1679. /* the bsg request will be completed in the interrupt handler */
  1680. return rval;
  1681. done_free_srb:
  1682. mempool_free(sp, ha->srb_mempool);
  1683. done_unmap_sg:
  1684. dma_unmap_sg(&ha->pdev->dev,
  1685. bsg_job->reply_payload.sg_list,
  1686. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1687. done_unmap_req_sg:
  1688. dma_unmap_sg(&ha->pdev->dev,
  1689. bsg_job->request_payload.sg_list,
  1690. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1691. done:
  1692. /* Return an error vendor specific response
  1693. * and complete the bsg request
  1694. */
  1695. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
  1696. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1697. bsg_reply->reply_payload_rcv_len = 0;
  1698. bsg_reply->result = (DID_OK) << 16;
  1699. bsg_job_done(bsg_job, bsg_reply->result,
  1700. bsg_reply->reply_payload_rcv_len);
  1701. /* Always return success, vendor rsp carries correct status */
  1702. return 0;
  1703. }
  1704. static int
  1705. qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
  1706. {
  1707. struct fc_bsg_request *bsg_request = bsg_job->request;
  1708. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1709. scsi_qla_host_t *vha = shost_priv(host);
  1710. struct qla_hw_data *ha = vha->hw;
  1711. int rval = (DID_ERROR << 16);
  1712. struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
  1713. srb_t *sp;
  1714. int req_sg_cnt = 0, rsp_sg_cnt = 0;
  1715. struct fc_port *fcport;
  1716. char *type = "FC_BSG_HST_FX_MGMT";
  1717. /* Copy the IOCB specific information */
  1718. piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
  1719. &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  1720. /* Dump the vendor information */
  1721. ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
  1722. piocb_rqst, sizeof(*piocb_rqst));
  1723. if (!vha->flags.online) {
  1724. ql_log(ql_log_warn, vha, 0x70d0,
  1725. "Host is not online.\n");
  1726. rval = -EIO;
  1727. goto done;
  1728. }
  1729. if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
  1730. req_sg_cnt = dma_map_sg(&ha->pdev->dev,
  1731. bsg_job->request_payload.sg_list,
  1732. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1733. if (!req_sg_cnt) {
  1734. ql_log(ql_log_warn, vha, 0x70c7,
  1735. "dma_map_sg return %d for request\n", req_sg_cnt);
  1736. rval = -ENOMEM;
  1737. goto done;
  1738. }
  1739. }
  1740. if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
  1741. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
  1742. bsg_job->reply_payload.sg_list,
  1743. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1744. if (!rsp_sg_cnt) {
  1745. ql_log(ql_log_warn, vha, 0x70c8,
  1746. "dma_map_sg return %d for reply\n", rsp_sg_cnt);
  1747. rval = -ENOMEM;
  1748. goto done_unmap_req_sg;
  1749. }
  1750. }
  1751. ql_dbg(ql_dbg_user, vha, 0x70c9,
  1752. "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
  1753. "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
  1754. req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
  1755. /* Allocate a dummy fcport structure, since functions preparing the
  1756. * IOCB and mailbox command retrieves port specific information
  1757. * from fcport structure. For Host based ELS commands there will be
  1758. * no fcport structure allocated
  1759. */
  1760. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  1761. if (!fcport) {
  1762. ql_log(ql_log_warn, vha, 0x70ca,
  1763. "Failed to allocate fcport.\n");
  1764. rval = -ENOMEM;
  1765. goto done_unmap_rsp_sg;
  1766. }
  1767. /* Alloc SRB structure */
  1768. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  1769. if (!sp) {
  1770. ql_log(ql_log_warn, vha, 0x70cb,
  1771. "qla2x00_get_sp failed.\n");
  1772. rval = -ENOMEM;
  1773. goto done_free_fcport;
  1774. }
  1775. /* Initialize all required fields of fcport */
  1776. fcport->vha = vha;
  1777. fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
  1778. sp->type = SRB_FXIOCB_BCMD;
  1779. sp->name = "bsg_fx_mgmt";
  1780. sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
  1781. sp->u.bsg_job = bsg_job;
  1782. sp->free = qla2x00_bsg_sp_free;
  1783. sp->done = qla2x00_bsg_job_done;
  1784. ql_dbg(ql_dbg_user, vha, 0x70cc,
  1785. "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
  1786. type, piocb_rqst->func_type, fcport->loop_id);
  1787. rval = qla2x00_start_sp(sp);
  1788. if (rval != QLA_SUCCESS) {
  1789. ql_log(ql_log_warn, vha, 0x70cd,
  1790. "qla2x00_start_sp failed=%d.\n", rval);
  1791. mempool_free(sp, ha->srb_mempool);
  1792. rval = -EIO;
  1793. goto done_free_fcport;
  1794. }
  1795. return rval;
  1796. done_free_fcport:
  1797. qla2x00_free_fcport(fcport);
  1798. done_unmap_rsp_sg:
  1799. if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
  1800. dma_unmap_sg(&ha->pdev->dev,
  1801. bsg_job->reply_payload.sg_list,
  1802. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1803. done_unmap_req_sg:
  1804. if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
  1805. dma_unmap_sg(&ha->pdev->dev,
  1806. bsg_job->request_payload.sg_list,
  1807. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1808. done:
  1809. return rval;
  1810. }
  1811. static int
  1812. qla26xx_serdes_op(struct bsg_job *bsg_job)
  1813. {
  1814. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1815. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1816. scsi_qla_host_t *vha = shost_priv(host);
  1817. int rval = 0;
  1818. struct qla_serdes_reg sr;
  1819. memset(&sr, 0, sizeof(sr));
  1820. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1821. bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
  1822. switch (sr.cmd) {
  1823. case INT_SC_SERDES_WRITE_REG:
  1824. rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
  1825. bsg_reply->reply_payload_rcv_len = 0;
  1826. break;
  1827. case INT_SC_SERDES_READ_REG:
  1828. rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
  1829. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1830. bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
  1831. bsg_reply->reply_payload_rcv_len = sizeof(sr);
  1832. break;
  1833. default:
  1834. ql_dbg(ql_dbg_user, vha, 0x708c,
  1835. "Unknown serdes cmd %x.\n", sr.cmd);
  1836. rval = -EINVAL;
  1837. break;
  1838. }
  1839. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1840. rval ? EXT_STATUS_MAILBOX : 0;
  1841. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1842. bsg_reply->result = DID_OK << 16;
  1843. bsg_job_done(bsg_job, bsg_reply->result,
  1844. bsg_reply->reply_payload_rcv_len);
  1845. return 0;
  1846. }
  1847. static int
  1848. qla8044_serdes_op(struct bsg_job *bsg_job)
  1849. {
  1850. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1851. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1852. scsi_qla_host_t *vha = shost_priv(host);
  1853. int rval = 0;
  1854. struct qla_serdes_reg_ex sr;
  1855. memset(&sr, 0, sizeof(sr));
  1856. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1857. bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
  1858. switch (sr.cmd) {
  1859. case INT_SC_SERDES_WRITE_REG:
  1860. rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
  1861. bsg_reply->reply_payload_rcv_len = 0;
  1862. break;
  1863. case INT_SC_SERDES_READ_REG:
  1864. rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
  1865. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1866. bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
  1867. bsg_reply->reply_payload_rcv_len = sizeof(sr);
  1868. break;
  1869. default:
  1870. ql_dbg(ql_dbg_user, vha, 0x7020,
  1871. "Unknown serdes cmd %x.\n", sr.cmd);
  1872. rval = -EINVAL;
  1873. break;
  1874. }
  1875. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1876. rval ? EXT_STATUS_MAILBOX : 0;
  1877. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1878. bsg_reply->result = DID_OK << 16;
  1879. bsg_job_done(bsg_job, bsg_reply->result,
  1880. bsg_reply->reply_payload_rcv_len);
  1881. return 0;
  1882. }
  1883. static int
  1884. qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
  1885. {
  1886. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1887. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1888. scsi_qla_host_t *vha = shost_priv(host);
  1889. struct qla_hw_data *ha = vha->hw;
  1890. struct qla_flash_update_caps cap;
  1891. if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
  1892. return -EPERM;
  1893. memset(&cap, 0, sizeof(cap));
  1894. cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
  1895. (uint64_t)ha->fw_attributes_ext[0] << 32 |
  1896. (uint64_t)ha->fw_attributes_h << 16 |
  1897. (uint64_t)ha->fw_attributes;
  1898. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1899. bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
  1900. bsg_reply->reply_payload_rcv_len = sizeof(cap);
  1901. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1902. EXT_STATUS_OK;
  1903. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1904. bsg_reply->result = DID_OK << 16;
  1905. bsg_job_done(bsg_job, bsg_reply->result,
  1906. bsg_reply->reply_payload_rcv_len);
  1907. return 0;
  1908. }
  1909. static int
  1910. qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
  1911. {
  1912. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1913. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1914. scsi_qla_host_t *vha = shost_priv(host);
  1915. struct qla_hw_data *ha = vha->hw;
  1916. uint64_t online_fw_attr = 0;
  1917. struct qla_flash_update_caps cap;
  1918. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1919. return -EPERM;
  1920. memset(&cap, 0, sizeof(cap));
  1921. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1922. bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
  1923. online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
  1924. (uint64_t)ha->fw_attributes_ext[0] << 32 |
  1925. (uint64_t)ha->fw_attributes_h << 16 |
  1926. (uint64_t)ha->fw_attributes;
  1927. if (online_fw_attr != cap.capabilities) {
  1928. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1929. EXT_STATUS_INVALID_PARAM;
  1930. return -EINVAL;
  1931. }
  1932. if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
  1933. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1934. EXT_STATUS_INVALID_PARAM;
  1935. return -EINVAL;
  1936. }
  1937. bsg_reply->reply_payload_rcv_len = 0;
  1938. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  1939. EXT_STATUS_OK;
  1940. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1941. bsg_reply->result = DID_OK << 16;
  1942. bsg_job_done(bsg_job, bsg_reply->result,
  1943. bsg_reply->reply_payload_rcv_len);
  1944. return 0;
  1945. }
  1946. static int
  1947. qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
  1948. {
  1949. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1950. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1951. scsi_qla_host_t *vha = shost_priv(host);
  1952. struct qla_hw_data *ha = vha->hw;
  1953. struct qla_bbcr_data bbcr;
  1954. uint16_t loop_id, topo, sw_cap;
  1955. uint8_t domain, area, al_pa, state;
  1956. int rval;
  1957. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1958. return -EPERM;
  1959. memset(&bbcr, 0, sizeof(bbcr));
  1960. if (vha->flags.bbcr_enable)
  1961. bbcr.status = QLA_BBCR_STATUS_ENABLED;
  1962. else
  1963. bbcr.status = QLA_BBCR_STATUS_DISABLED;
  1964. if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
  1965. rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
  1966. &area, &domain, &topo, &sw_cap);
  1967. if (rval != QLA_SUCCESS) {
  1968. bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
  1969. bbcr.state = QLA_BBCR_STATE_OFFLINE;
  1970. bbcr.mbx1 = loop_id;
  1971. goto done;
  1972. }
  1973. state = (vha->bbcr >> 12) & 0x1;
  1974. if (state) {
  1975. bbcr.state = QLA_BBCR_STATE_OFFLINE;
  1976. bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
  1977. } else {
  1978. bbcr.state = QLA_BBCR_STATE_ONLINE;
  1979. bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
  1980. }
  1981. bbcr.configured_bbscn = vha->bbcr & 0xf;
  1982. }
  1983. done:
  1984. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1985. bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
  1986. bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
  1987. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
  1988. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1989. bsg_reply->result = DID_OK << 16;
  1990. bsg_job_done(bsg_job, bsg_reply->result,
  1991. bsg_reply->reply_payload_rcv_len);
  1992. return 0;
  1993. }
  1994. static int
  1995. qla2x00_get_priv_stats(struct bsg_job *bsg_job)
  1996. {
  1997. struct fc_bsg_request *bsg_request = bsg_job->request;
  1998. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1999. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  2000. scsi_qla_host_t *vha = shost_priv(host);
  2001. struct qla_hw_data *ha = vha->hw;
  2002. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  2003. struct link_statistics *stats = NULL;
  2004. dma_addr_t stats_dma;
  2005. int rval;
  2006. uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
  2007. uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
  2008. if (test_bit(UNLOADING, &vha->dpc_flags))
  2009. return -ENODEV;
  2010. if (unlikely(pci_channel_offline(ha->pdev)))
  2011. return -ENODEV;
  2012. if (qla2x00_reset_active(vha))
  2013. return -EBUSY;
  2014. if (!IS_FWI2_CAPABLE(ha))
  2015. return -EPERM;
  2016. stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
  2017. GFP_KERNEL);
  2018. if (!stats) {
  2019. ql_log(ql_log_warn, vha, 0x70e2,
  2020. "Failed to allocate memory for stats.\n");
  2021. return -ENOMEM;
  2022. }
  2023. rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
  2024. if (rval == QLA_SUCCESS) {
  2025. ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
  2026. stats, sizeof(*stats));
  2027. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2028. bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
  2029. }
  2030. bsg_reply->reply_payload_rcv_len = sizeof(*stats);
  2031. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  2032. rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
  2033. bsg_job->reply_len = sizeof(*bsg_reply);
  2034. bsg_reply->result = DID_OK << 16;
  2035. bsg_job_done(bsg_job, bsg_reply->result,
  2036. bsg_reply->reply_payload_rcv_len);
  2037. dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
  2038. stats, stats_dma);
  2039. return 0;
  2040. }
  2041. static int
  2042. qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
  2043. {
  2044. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2045. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  2046. scsi_qla_host_t *vha = shost_priv(host);
  2047. int rval;
  2048. struct qla_dport_diag *dd;
  2049. if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
  2050. !IS_QLA28XX(vha->hw))
  2051. return -EPERM;
  2052. dd = kmalloc(sizeof(*dd), GFP_KERNEL);
  2053. if (!dd) {
  2054. ql_log(ql_log_warn, vha, 0x70db,
  2055. "Failed to allocate memory for dport.\n");
  2056. return -ENOMEM;
  2057. }
  2058. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  2059. bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
  2060. rval = qla26xx_dport_diagnostics(
  2061. vha, dd->buf, sizeof(dd->buf), dd->options);
  2062. if (rval == QLA_SUCCESS) {
  2063. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2064. bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
  2065. }
  2066. bsg_reply->reply_payload_rcv_len = sizeof(*dd);
  2067. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  2068. rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
  2069. bsg_job->reply_len = sizeof(*bsg_reply);
  2070. bsg_reply->result = DID_OK << 16;
  2071. bsg_job_done(bsg_job, bsg_reply->result,
  2072. bsg_reply->reply_payload_rcv_len);
  2073. kfree(dd);
  2074. return 0;
  2075. }
  2076. static int
  2077. qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job)
  2078. {
  2079. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2080. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  2081. scsi_qla_host_t *vha = shost_priv(host);
  2082. int rval;
  2083. struct qla_dport_diag_v2 *dd;
  2084. mbx_cmd_t mc;
  2085. mbx_cmd_t *mcp = &mc;
  2086. uint16_t options;
  2087. if (!IS_DPORT_CAPABLE(vha->hw))
  2088. return -EPERM;
  2089. dd = kzalloc(sizeof(*dd), GFP_KERNEL);
  2090. if (!dd)
  2091. return -ENOMEM;
  2092. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  2093. bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
  2094. options = dd->options;
  2095. /* Check dport Test in progress */
  2096. if (options == QLA_GET_DPORT_RESULT_V2 &&
  2097. vha->dport_status & DPORT_DIAG_IN_PROGRESS) {
  2098. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  2099. EXT_STATUS_DPORT_DIAG_IN_PROCESS;
  2100. goto dportcomplete;
  2101. }
  2102. /* Check chip reset in progress and start/restart requests arrive */
  2103. if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
  2104. (options == QLA_START_DPORT_TEST_V2 ||
  2105. options == QLA_RESTART_DPORT_TEST_V2)) {
  2106. vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
  2107. }
  2108. /* Check chip reset in progress and get result request arrive */
  2109. if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
  2110. options == QLA_GET_DPORT_RESULT_V2) {
  2111. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  2112. EXT_STATUS_DPORT_DIAG_NOT_RUNNING;
  2113. goto dportcomplete;
  2114. }
  2115. rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp);
  2116. if (rval == QLA_SUCCESS) {
  2117. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  2118. EXT_STATUS_OK;
  2119. if (options == QLA_START_DPORT_TEST_V2 ||
  2120. options == QLA_RESTART_DPORT_TEST_V2) {
  2121. dd->mbx1 = mcp->mb[0];
  2122. dd->mbx2 = mcp->mb[1];
  2123. vha->dport_status |= DPORT_DIAG_IN_PROGRESS;
  2124. } else if (options == QLA_GET_DPORT_RESULT_V2) {
  2125. dd->mbx1 = le16_to_cpu(vha->dport_data[1]);
  2126. dd->mbx2 = le16_to_cpu(vha->dport_data[2]);
  2127. }
  2128. } else {
  2129. dd->mbx1 = mcp->mb[0];
  2130. dd->mbx2 = mcp->mb[1];
  2131. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
  2132. EXT_STATUS_DPORT_DIAG_ERR;
  2133. }
  2134. dportcomplete:
  2135. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2136. bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
  2137. bsg_reply->reply_payload_rcv_len = sizeof(*dd);
  2138. bsg_job->reply_len = sizeof(*bsg_reply);
  2139. bsg_reply->result = DID_OK << 16;
  2140. bsg_job_done(bsg_job, bsg_reply->result,
  2141. bsg_reply->reply_payload_rcv_len);
  2142. kfree(dd);
  2143. return 0;
  2144. }
  2145. static int
  2146. qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
  2147. {
  2148. scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
  2149. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2150. struct qla_hw_data *ha = vha->hw;
  2151. struct qla_active_regions regions = { };
  2152. struct active_regions active_regions = { };
  2153. qla27xx_get_active_image(vha, &active_regions);
  2154. regions.global_image = active_regions.global;
  2155. if (IS_QLA27XX(ha))
  2156. regions.nvme_params = QLA27XX_PRIMARY_IMAGE;
  2157. if (IS_QLA28XX(ha)) {
  2158. qla28xx_get_aux_images(vha, &active_regions);
  2159. regions.board_config = active_regions.aux.board_config;
  2160. regions.vpd_nvram = active_regions.aux.vpd_nvram;
  2161. regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
  2162. regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
  2163. regions.nvme_params = active_regions.aux.nvme_params;
  2164. }
  2165. ql_dbg(ql_dbg_user, vha, 0x70e1,
  2166. "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n",
  2167. __func__, vha->host_no, regions.global_image,
  2168. regions.board_config, regions.vpd_nvram,
  2169. regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params);
  2170. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2171. bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
  2172. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
  2173. bsg_reply->reply_payload_rcv_len = sizeof(regions);
  2174. bsg_reply->result = DID_OK << 16;
  2175. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  2176. bsg_job_done(bsg_job, bsg_reply->result,
  2177. bsg_reply->reply_payload_rcv_len);
  2178. return 0;
  2179. }
  2180. static int
  2181. qla2x00_manage_host_stats(struct bsg_job *bsg_job)
  2182. {
  2183. scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
  2184. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2185. struct ql_vnd_mng_host_stats_param *req_data;
  2186. struct ql_vnd_mng_host_stats_resp rsp_data;
  2187. u32 req_data_len;
  2188. int ret = 0;
  2189. if (!vha->flags.online) {
  2190. ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
  2191. return -EIO;
  2192. }
  2193. req_data_len = bsg_job->request_payload.payload_len;
  2194. if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
  2195. ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
  2196. return -EIO;
  2197. }
  2198. req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
  2199. if (!req_data) {
  2200. ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
  2201. return -ENOMEM;
  2202. }
  2203. /* Copy the request buffer in req_data */
  2204. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  2205. bsg_job->request_payload.sg_cnt, req_data,
  2206. req_data_len);
  2207. switch (req_data->action) {
  2208. case QLA_STOP:
  2209. ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
  2210. break;
  2211. case QLA_START:
  2212. ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
  2213. break;
  2214. case QLA_CLEAR:
  2215. ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
  2216. break;
  2217. default:
  2218. ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
  2219. ret = -EIO;
  2220. break;
  2221. }
  2222. kfree(req_data);
  2223. /* Prepare response */
  2224. rsp_data.status = ret;
  2225. bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
  2226. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
  2227. bsg_reply->reply_payload_rcv_len =
  2228. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2229. bsg_job->reply_payload.sg_cnt,
  2230. &rsp_data,
  2231. sizeof(struct ql_vnd_mng_host_stats_resp));
  2232. bsg_reply->result = DID_OK;
  2233. bsg_job_done(bsg_job, bsg_reply->result,
  2234. bsg_reply->reply_payload_rcv_len);
  2235. return ret;
  2236. }
  2237. static int
  2238. qla2x00_get_host_stats(struct bsg_job *bsg_job)
  2239. {
  2240. scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
  2241. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2242. struct ql_vnd_stats_param *req_data;
  2243. struct ql_vnd_host_stats_resp rsp_data;
  2244. u32 req_data_len;
  2245. int ret = 0;
  2246. u64 ini_entry_count = 0;
  2247. u64 entry_count = 0;
  2248. u64 tgt_num = 0;
  2249. u64 tmp_stat_type = 0;
  2250. u64 response_len = 0;
  2251. void *data;
  2252. req_data_len = bsg_job->request_payload.payload_len;
  2253. if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
  2254. ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
  2255. return -EIO;
  2256. }
  2257. req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
  2258. if (!req_data) {
  2259. ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
  2260. return -ENOMEM;
  2261. }
  2262. /* Copy the request buffer in req_data */
  2263. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  2264. bsg_job->request_payload.sg_cnt, req_data, req_data_len);
  2265. /* Copy stat type to work on it */
  2266. tmp_stat_type = req_data->stat_type;
  2267. if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
  2268. /* Num of tgts connected to this host */
  2269. tgt_num = qla2x00_get_num_tgts(vha);
  2270. /* unset BIT_17 */
  2271. tmp_stat_type &= ~(1 << 17);
  2272. }
  2273. /* Total ini stats */
  2274. ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
  2275. /* Total number of entries */
  2276. entry_count = ini_entry_count + tgt_num;
  2277. response_len = sizeof(struct ql_vnd_host_stats_resp) +
  2278. (sizeof(struct ql_vnd_stat_entry) * entry_count);
  2279. if (response_len > bsg_job->reply_payload.payload_len) {
  2280. rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
  2281. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
  2282. bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
  2283. bsg_reply->reply_payload_rcv_len =
  2284. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2285. bsg_job->reply_payload.sg_cnt, &rsp_data,
  2286. sizeof(struct ql_vnd_mng_host_stats_resp));
  2287. bsg_reply->result = DID_OK;
  2288. bsg_job_done(bsg_job, bsg_reply->result,
  2289. bsg_reply->reply_payload_rcv_len);
  2290. goto host_stat_out;
  2291. }
  2292. data = kzalloc(response_len, GFP_KERNEL);
  2293. if (!data) {
  2294. ret = -ENOMEM;
  2295. goto host_stat_out;
  2296. }
  2297. ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
  2298. data, response_len);
  2299. rsp_data.status = EXT_STATUS_OK;
  2300. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
  2301. bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2302. bsg_job->reply_payload.sg_cnt,
  2303. data, response_len);
  2304. bsg_reply->result = DID_OK;
  2305. bsg_job_done(bsg_job, bsg_reply->result,
  2306. bsg_reply->reply_payload_rcv_len);
  2307. kfree(data);
  2308. host_stat_out:
  2309. kfree(req_data);
  2310. return ret;
  2311. }
  2312. static struct fc_rport *
  2313. qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
  2314. {
  2315. fc_port_t *fcport = NULL;
  2316. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  2317. if (fcport->rport->number == tgt_num)
  2318. return fcport->rport;
  2319. }
  2320. return NULL;
  2321. }
  2322. static int
  2323. qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
  2324. {
  2325. scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
  2326. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2327. struct ql_vnd_tgt_stats_param *req_data;
  2328. u32 req_data_len;
  2329. int ret = 0;
  2330. u64 response_len = 0;
  2331. struct ql_vnd_tgt_stats_resp *data = NULL;
  2332. struct fc_rport *rport = NULL;
  2333. if (!vha->flags.online) {
  2334. ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
  2335. return -EIO;
  2336. }
  2337. req_data_len = bsg_job->request_payload.payload_len;
  2338. if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
  2339. ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
  2340. return -EIO;
  2341. }
  2342. req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
  2343. if (!req_data) {
  2344. ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
  2345. return -ENOMEM;
  2346. }
  2347. /* Copy the request buffer in req_data */
  2348. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  2349. bsg_job->request_payload.sg_cnt,
  2350. req_data, req_data_len);
  2351. response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
  2352. sizeof(struct ql_vnd_stat_entry);
  2353. /* structure + size for one entry */
  2354. data = kzalloc(response_len, GFP_KERNEL);
  2355. if (!data) {
  2356. kfree(req_data);
  2357. return -ENOMEM;
  2358. }
  2359. if (response_len > bsg_job->reply_payload.payload_len) {
  2360. data->status = EXT_STATUS_BUFFER_TOO_SMALL;
  2361. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
  2362. bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
  2363. bsg_reply->reply_payload_rcv_len =
  2364. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2365. bsg_job->reply_payload.sg_cnt, data,
  2366. sizeof(struct ql_vnd_tgt_stats_resp));
  2367. bsg_reply->result = DID_OK;
  2368. bsg_job_done(bsg_job, bsg_reply->result,
  2369. bsg_reply->reply_payload_rcv_len);
  2370. goto tgt_stat_out;
  2371. }
  2372. rport = qla2xxx_find_rport(vha, req_data->tgt_id);
  2373. if (!rport) {
  2374. ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
  2375. ret = EXT_STATUS_INVALID_PARAM;
  2376. data->status = EXT_STATUS_INVALID_PARAM;
  2377. goto reply;
  2378. }
  2379. ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
  2380. rport, (void *)data, response_len);
  2381. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
  2382. reply:
  2383. bsg_reply->reply_payload_rcv_len =
  2384. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2385. bsg_job->reply_payload.sg_cnt, data,
  2386. response_len);
  2387. bsg_reply->result = DID_OK;
  2388. bsg_job_done(bsg_job, bsg_reply->result,
  2389. bsg_reply->reply_payload_rcv_len);
  2390. tgt_stat_out:
  2391. kfree(data);
  2392. kfree(req_data);
  2393. return ret;
  2394. }
  2395. static int
  2396. qla2x00_manage_host_port(struct bsg_job *bsg_job)
  2397. {
  2398. scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
  2399. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2400. struct ql_vnd_mng_host_port_param *req_data;
  2401. struct ql_vnd_mng_host_port_resp rsp_data;
  2402. u32 req_data_len;
  2403. int ret = 0;
  2404. req_data_len = bsg_job->request_payload.payload_len;
  2405. if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
  2406. ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
  2407. return -EIO;
  2408. }
  2409. req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
  2410. if (!req_data) {
  2411. ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
  2412. return -ENOMEM;
  2413. }
  2414. /* Copy the request buffer in req_data */
  2415. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  2416. bsg_job->request_payload.sg_cnt, req_data, req_data_len);
  2417. switch (req_data->action) {
  2418. case QLA_ENABLE:
  2419. ret = qla2xxx_enable_port(vha->host);
  2420. break;
  2421. case QLA_DISABLE:
  2422. ret = qla2xxx_disable_port(vha->host);
  2423. break;
  2424. default:
  2425. ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
  2426. ret = -EIO;
  2427. break;
  2428. }
  2429. kfree(req_data);
  2430. /* Prepare response */
  2431. rsp_data.status = ret;
  2432. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
  2433. bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
  2434. bsg_reply->reply_payload_rcv_len =
  2435. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2436. bsg_job->reply_payload.sg_cnt, &rsp_data,
  2437. sizeof(struct ql_vnd_mng_host_port_resp));
  2438. bsg_reply->result = DID_OK;
  2439. bsg_job_done(bsg_job, bsg_reply->result,
  2440. bsg_reply->reply_payload_rcv_len);
  2441. return ret;
  2442. }
  2443. static int
  2444. qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
  2445. {
  2446. struct fc_bsg_request *bsg_request = bsg_job->request;
  2447. ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
  2448. __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
  2449. switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
  2450. case QL_VND_LOOPBACK:
  2451. return qla2x00_process_loopback(bsg_job);
  2452. case QL_VND_A84_RESET:
  2453. return qla84xx_reset(bsg_job);
  2454. case QL_VND_A84_UPDATE_FW:
  2455. return qla84xx_updatefw(bsg_job);
  2456. case QL_VND_A84_MGMT_CMD:
  2457. return qla84xx_mgmt_cmd(bsg_job);
  2458. case QL_VND_IIDMA:
  2459. return qla24xx_iidma(bsg_job);
  2460. case QL_VND_FCP_PRIO_CFG_CMD:
  2461. return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
  2462. case QL_VND_READ_FLASH:
  2463. return qla2x00_read_optrom(bsg_job);
  2464. case QL_VND_UPDATE_FLASH:
  2465. return qla2x00_update_optrom(bsg_job);
  2466. case QL_VND_SET_FRU_VERSION:
  2467. return qla2x00_update_fru_versions(bsg_job);
  2468. case QL_VND_READ_FRU_STATUS:
  2469. return qla2x00_read_fru_status(bsg_job);
  2470. case QL_VND_WRITE_FRU_STATUS:
  2471. return qla2x00_write_fru_status(bsg_job);
  2472. case QL_VND_WRITE_I2C:
  2473. return qla2x00_write_i2c(bsg_job);
  2474. case QL_VND_READ_I2C:
  2475. return qla2x00_read_i2c(bsg_job);
  2476. case QL_VND_DIAG_IO_CMD:
  2477. return qla24xx_process_bidir_cmd(bsg_job);
  2478. case QL_VND_FX00_MGMT_CMD:
  2479. return qlafx00_mgmt_cmd(bsg_job);
  2480. case QL_VND_SERDES_OP:
  2481. return qla26xx_serdes_op(bsg_job);
  2482. case QL_VND_SERDES_OP_EX:
  2483. return qla8044_serdes_op(bsg_job);
  2484. case QL_VND_GET_FLASH_UPDATE_CAPS:
  2485. return qla27xx_get_flash_upd_cap(bsg_job);
  2486. case QL_VND_SET_FLASH_UPDATE_CAPS:
  2487. return qla27xx_set_flash_upd_cap(bsg_job);
  2488. case QL_VND_GET_BBCR_DATA:
  2489. return qla27xx_get_bbcr_data(bsg_job);
  2490. case QL_VND_GET_PRIV_STATS:
  2491. case QL_VND_GET_PRIV_STATS_EX:
  2492. return qla2x00_get_priv_stats(bsg_job);
  2493. case QL_VND_DPORT_DIAGNOSTICS:
  2494. return qla2x00_do_dport_diagnostics(bsg_job);
  2495. case QL_VND_DPORT_DIAGNOSTICS_V2:
  2496. return qla2x00_do_dport_diagnostics_v2(bsg_job);
  2497. case QL_VND_EDIF_MGMT:
  2498. return qla_edif_app_mgmt(bsg_job);
  2499. case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
  2500. return qla2x00_get_flash_image_status(bsg_job);
  2501. case QL_VND_MANAGE_HOST_STATS:
  2502. return qla2x00_manage_host_stats(bsg_job);
  2503. case QL_VND_GET_HOST_STATS:
  2504. return qla2x00_get_host_stats(bsg_job);
  2505. case QL_VND_GET_TGT_STATS:
  2506. return qla2x00_get_tgt_stats(bsg_job);
  2507. case QL_VND_MANAGE_HOST_PORT:
  2508. return qla2x00_manage_host_port(bsg_job);
  2509. case QL_VND_MBX_PASSTHRU:
  2510. return qla2x00_mailbox_passthru(bsg_job);
  2511. default:
  2512. return -ENOSYS;
  2513. }
  2514. }
  2515. int
  2516. qla24xx_bsg_request(struct bsg_job *bsg_job)
  2517. {
  2518. struct fc_bsg_request *bsg_request = bsg_job->request;
  2519. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2520. int ret = -EINVAL;
  2521. struct fc_rport *rport;
  2522. struct Scsi_Host *host;
  2523. scsi_qla_host_t *vha;
  2524. /* In case no data transferred. */
  2525. bsg_reply->reply_payload_rcv_len = 0;
  2526. if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
  2527. rport = fc_bsg_to_rport(bsg_job);
  2528. if (!rport)
  2529. return ret;
  2530. host = rport_to_shost(rport);
  2531. vha = shost_priv(host);
  2532. } else {
  2533. host = fc_bsg_to_shost(bsg_job);
  2534. vha = shost_priv(host);
  2535. }
  2536. /* Disable port will bring down the chip, allow enable command */
  2537. if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
  2538. bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
  2539. goto skip_chip_chk;
  2540. if (vha->hw->flags.port_isolated) {
  2541. bsg_reply->result = DID_ERROR;
  2542. /* operation not permitted */
  2543. return -EPERM;
  2544. }
  2545. if (qla2x00_chip_is_down(vha)) {
  2546. ql_dbg(ql_dbg_user, vha, 0x709f,
  2547. "BSG: ISP abort active/needed -- cmd=%d.\n",
  2548. bsg_request->msgcode);
  2549. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  2550. return -EBUSY;
  2551. }
  2552. if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
  2553. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  2554. return -EIO;
  2555. }
  2556. skip_chip_chk:
  2557. ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
  2558. "Entered %s msgcode=0x%x. bsg ptr %px\n",
  2559. __func__, bsg_request->msgcode, bsg_job);
  2560. switch (bsg_request->msgcode) {
  2561. case FC_BSG_RPT_ELS:
  2562. case FC_BSG_HST_ELS_NOLOGIN:
  2563. ret = qla2x00_process_els(bsg_job);
  2564. break;
  2565. case FC_BSG_HST_CT:
  2566. ret = qla2x00_process_ct(bsg_job);
  2567. break;
  2568. case FC_BSG_HST_VENDOR:
  2569. ret = qla2x00_process_vendor_specific(vha, bsg_job);
  2570. break;
  2571. case FC_BSG_HST_ADD_RPORT:
  2572. case FC_BSG_HST_DEL_RPORT:
  2573. case FC_BSG_RPT_CT:
  2574. default:
  2575. ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
  2576. break;
  2577. }
  2578. ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
  2579. "%s done with return %x\n", __func__, ret);
  2580. return ret;
  2581. }
  2582. int
  2583. qla24xx_bsg_timeout(struct bsg_job *bsg_job)
  2584. {
  2585. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2586. scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
  2587. struct qla_hw_data *ha = vha->hw;
  2588. srb_t *sp;
  2589. int cnt, que;
  2590. unsigned long flags;
  2591. struct req_que *req;
  2592. ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
  2593. __func__, bsg_job);
  2594. if (qla2x00_isp_reg_stat(ha)) {
  2595. ql_log(ql_log_info, vha, 0x9007,
  2596. "PCI/Register disconnect.\n");
  2597. qla_pci_set_eeh_busy(vha);
  2598. }
  2599. /* find the bsg job from the active list of commands */
  2600. spin_lock_irqsave(&ha->hardware_lock, flags);
  2601. for (que = 0; que < ha->max_req_queues; que++) {
  2602. req = ha->req_q_map[que];
  2603. if (!req)
  2604. continue;
  2605. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
  2606. sp = req->outstanding_cmds[cnt];
  2607. if (sp &&
  2608. (sp->type == SRB_CT_CMD ||
  2609. sp->type == SRB_ELS_CMD_HST ||
  2610. sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
  2611. sp->type == SRB_FXIOCB_BCMD) &&
  2612. sp->u.bsg_job == bsg_job) {
  2613. req->outstanding_cmds[cnt] = NULL;
  2614. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2615. if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
  2616. ql_log(ql_log_warn, vha, 0x7089,
  2617. "mbx abort_command failed.\n");
  2618. bsg_reply->result = -EIO;
  2619. } else {
  2620. ql_dbg(ql_dbg_user, vha, 0x708a,
  2621. "mbx abort_command success.\n");
  2622. bsg_reply->result = 0;
  2623. }
  2624. spin_lock_irqsave(&ha->hardware_lock, flags);
  2625. goto done;
  2626. }
  2627. }
  2628. }
  2629. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2630. ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
  2631. bsg_reply->result = -ENXIO;
  2632. return 0;
  2633. done:
  2634. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2635. /* ref: INIT */
  2636. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  2637. return 0;
  2638. }
  2639. int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
  2640. {
  2641. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2642. scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
  2643. int ret = -EINVAL;
  2644. int ptsize = sizeof(struct qla_mbx_passthru);
  2645. struct qla_mbx_passthru *req_data = NULL;
  2646. uint32_t req_data_len;
  2647. req_data_len = bsg_job->request_payload.payload_len;
  2648. if (req_data_len != ptsize) {
  2649. ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n");
  2650. return -EIO;
  2651. }
  2652. req_data = kzalloc(ptsize, GFP_KERNEL);
  2653. if (!req_data) {
  2654. ql_log(ql_log_warn, vha, 0xf0a4,
  2655. "req_data memory allocation failure.\n");
  2656. return -ENOMEM;
  2657. }
  2658. /* Copy the request buffer in req_data */
  2659. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  2660. bsg_job->request_payload.sg_cnt, req_data, ptsize);
  2661. ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out);
  2662. /* Copy the req_data in request buffer */
  2663. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  2664. bsg_job->reply_payload.sg_cnt, req_data, ptsize);
  2665. bsg_reply->reply_payload_rcv_len = ptsize;
  2666. if (ret == QLA_SUCCESS)
  2667. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
  2668. else
  2669. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR;
  2670. bsg_job->reply_len = sizeof(*bsg_job->reply);
  2671. bsg_reply->result = DID_OK << 16;
  2672. bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
  2673. kfree(req_data);
  2674. return ret;
  2675. }