qla_attr.c 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2014 QLogic Corporation
  5. */
  6. #include "qla_def.h"
  7. #include "qla_target.h"
  8. #include <linux/kthread.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/slab.h>
  11. #include <linux/delay.h>
  12. static int qla24xx_vport_disable(struct fc_vport *, bool);
  13. /* SYSFS attributes --------------------------------------------------------- */
  14. static ssize_t
  15. qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
  16. struct bin_attribute *bin_attr,
  17. char *buf, loff_t off, size_t count)
  18. {
  19. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  20. struct device, kobj)));
  21. struct qla_hw_data *ha = vha->hw;
  22. int rval = 0;
  23. if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
  24. ha->mpi_fw_dump_reading))
  25. return 0;
  26. mutex_lock(&ha->optrom_mutex);
  27. if (IS_P3P_TYPE(ha)) {
  28. if (off < ha->md_template_size) {
  29. rval = memory_read_from_buffer(buf, count,
  30. &off, ha->md_tmplt_hdr, ha->md_template_size);
  31. } else {
  32. off -= ha->md_template_size;
  33. rval = memory_read_from_buffer(buf, count,
  34. &off, ha->md_dump, ha->md_dump_size);
  35. }
  36. } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
  37. rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
  38. MCTP_DUMP_SIZE);
  39. } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
  40. rval = memory_read_from_buffer(buf, count, &off,
  41. ha->mpi_fw_dump,
  42. ha->mpi_fw_dump_len);
  43. } else if (ha->fw_dump_reading) {
  44. rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
  45. ha->fw_dump_len);
  46. } else {
  47. rval = 0;
  48. }
  49. mutex_unlock(&ha->optrom_mutex);
  50. return rval;
  51. }
  52. static ssize_t
  53. qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
  54. struct bin_attribute *bin_attr,
  55. char *buf, loff_t off, size_t count)
  56. {
  57. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  58. struct device, kobj)));
  59. struct qla_hw_data *ha = vha->hw;
  60. int reading;
  61. if (off != 0)
  62. return (0);
  63. reading = simple_strtol(buf, NULL, 10);
  64. switch (reading) {
  65. case 0:
  66. if (!ha->fw_dump_reading)
  67. break;
  68. ql_log(ql_log_info, vha, 0x705d,
  69. "Firmware dump cleared on (%ld).\n", vha->host_no);
  70. if (IS_P3P_TYPE(ha)) {
  71. qla82xx_md_free(vha);
  72. qla82xx_md_prep(vha);
  73. }
  74. ha->fw_dump_reading = 0;
  75. ha->fw_dumped = false;
  76. break;
  77. case 1:
  78. if (ha->fw_dumped && !ha->fw_dump_reading) {
  79. ha->fw_dump_reading = 1;
  80. ql_log(ql_log_info, vha, 0x705e,
  81. "Raw firmware dump ready for read on (%ld).\n",
  82. vha->host_no);
  83. }
  84. break;
  85. case 2:
  86. qla2x00_alloc_fw_dump(vha);
  87. break;
  88. case 3:
  89. if (IS_QLA82XX(ha)) {
  90. qla82xx_idc_lock(ha);
  91. qla82xx_set_reset_owner(vha);
  92. qla82xx_idc_unlock(ha);
  93. } else if (IS_QLA8044(ha)) {
  94. qla8044_idc_lock(ha);
  95. qla82xx_set_reset_owner(vha);
  96. qla8044_idc_unlock(ha);
  97. } else {
  98. qla2x00_system_error(vha);
  99. }
  100. break;
  101. case 4:
  102. if (IS_P3P_TYPE(ha)) {
  103. if (ha->md_tmplt_hdr)
  104. ql_dbg(ql_dbg_user, vha, 0x705b,
  105. "MiniDump supported with this firmware.\n");
  106. else
  107. ql_dbg(ql_dbg_user, vha, 0x709d,
  108. "MiniDump not supported with this firmware.\n");
  109. }
  110. break;
  111. case 5:
  112. if (IS_P3P_TYPE(ha))
  113. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  114. break;
  115. case 6:
  116. if (!ha->mctp_dump_reading)
  117. break;
  118. ql_log(ql_log_info, vha, 0x70c1,
  119. "MCTP dump cleared on (%ld).\n", vha->host_no);
  120. ha->mctp_dump_reading = 0;
  121. ha->mctp_dumped = 0;
  122. break;
  123. case 7:
  124. if (ha->mctp_dumped && !ha->mctp_dump_reading) {
  125. ha->mctp_dump_reading = 1;
  126. ql_log(ql_log_info, vha, 0x70c2,
  127. "Raw mctp dump ready for read on (%ld).\n",
  128. vha->host_no);
  129. }
  130. break;
  131. case 8:
  132. if (!ha->mpi_fw_dump_reading)
  133. break;
  134. ql_log(ql_log_info, vha, 0x70e7,
  135. "MPI firmware dump cleared on (%ld).\n", vha->host_no);
  136. ha->mpi_fw_dump_reading = 0;
  137. ha->mpi_fw_dumped = 0;
  138. break;
  139. case 9:
  140. if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
  141. ha->mpi_fw_dump_reading = 1;
  142. ql_log(ql_log_info, vha, 0x70e8,
  143. "Raw MPI firmware dump ready for read on (%ld).\n",
  144. vha->host_no);
  145. }
  146. break;
  147. case 10:
  148. if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  149. ql_log(ql_log_info, vha, 0x70e9,
  150. "Issuing MPI firmware dump on host#%ld.\n",
  151. vha->host_no);
  152. ha->isp_ops->mpi_fw_dump(vha, 0);
  153. }
  154. break;
  155. }
  156. return count;
  157. }
  158. static struct bin_attribute sysfs_fw_dump_attr = {
  159. .attr = {
  160. .name = "fw_dump",
  161. .mode = S_IRUSR | S_IWUSR,
  162. },
  163. .size = 0,
  164. .read = qla2x00_sysfs_read_fw_dump,
  165. .write = qla2x00_sysfs_write_fw_dump,
  166. };
  167. static ssize_t
  168. qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
  169. struct bin_attribute *bin_attr,
  170. char *buf, loff_t off, size_t count)
  171. {
  172. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  173. struct device, kobj)));
  174. struct qla_hw_data *ha = vha->hw;
  175. uint32_t faddr;
  176. struct active_regions active_regions = { };
  177. if (!capable(CAP_SYS_ADMIN))
  178. return 0;
  179. mutex_lock(&ha->optrom_mutex);
  180. if (qla2x00_chip_is_down(vha)) {
  181. mutex_unlock(&ha->optrom_mutex);
  182. return -EAGAIN;
  183. }
  184. if (!IS_NOCACHE_VPD_TYPE(ha)) {
  185. mutex_unlock(&ha->optrom_mutex);
  186. goto skip;
  187. }
  188. faddr = ha->flt_region_nvram;
  189. if (IS_QLA28XX(ha)) {
  190. qla28xx_get_aux_images(vha, &active_regions);
  191. if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
  192. faddr = ha->flt_region_nvram_sec;
  193. }
  194. ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
  195. mutex_unlock(&ha->optrom_mutex);
  196. skip:
  197. return memory_read_from_buffer(buf, count, &off, ha->nvram,
  198. ha->nvram_size);
  199. }
  200. static ssize_t
  201. qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
  202. struct bin_attribute *bin_attr,
  203. char *buf, loff_t off, size_t count)
  204. {
  205. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  206. struct device, kobj)));
  207. struct qla_hw_data *ha = vha->hw;
  208. uint16_t cnt;
  209. if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
  210. !ha->isp_ops->write_nvram)
  211. return -EINVAL;
  212. /* Checksum NVRAM. */
  213. if (IS_FWI2_CAPABLE(ha)) {
  214. __le32 *iter = (__force __le32 *)buf;
  215. uint32_t chksum;
  216. chksum = 0;
  217. for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
  218. chksum += le32_to_cpu(*iter);
  219. chksum = ~chksum + 1;
  220. *iter = cpu_to_le32(chksum);
  221. } else {
  222. uint8_t *iter;
  223. uint8_t chksum;
  224. iter = (uint8_t *)buf;
  225. chksum = 0;
  226. for (cnt = 0; cnt < count - 1; cnt++)
  227. chksum += *iter++;
  228. chksum = ~chksum + 1;
  229. *iter = chksum;
  230. }
  231. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  232. ql_log(ql_log_warn, vha, 0x705f,
  233. "HBA not online, failing NVRAM update.\n");
  234. return -EAGAIN;
  235. }
  236. mutex_lock(&ha->optrom_mutex);
  237. if (qla2x00_chip_is_down(vha)) {
  238. mutex_unlock(&ha->optrom_mutex);
  239. return -EAGAIN;
  240. }
  241. /* Write NVRAM. */
  242. ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
  243. ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
  244. count);
  245. mutex_unlock(&ha->optrom_mutex);
  246. ql_dbg(ql_dbg_user, vha, 0x7060,
  247. "Setting ISP_ABORT_NEEDED\n");
  248. /* NVRAM settings take effect immediately. */
  249. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  250. qla2xxx_wake_dpc(vha);
  251. qla2x00_wait_for_chip_reset(vha);
  252. return count;
  253. }
  254. static struct bin_attribute sysfs_nvram_attr = {
  255. .attr = {
  256. .name = "nvram",
  257. .mode = S_IRUSR | S_IWUSR,
  258. },
  259. .size = 512,
  260. .read = qla2x00_sysfs_read_nvram,
  261. .write = qla2x00_sysfs_write_nvram,
  262. };
  263. static ssize_t
  264. qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
  265. struct bin_attribute *bin_attr,
  266. char *buf, loff_t off, size_t count)
  267. {
  268. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  269. struct device, kobj)));
  270. struct qla_hw_data *ha = vha->hw;
  271. ssize_t rval = 0;
  272. mutex_lock(&ha->optrom_mutex);
  273. if (ha->optrom_state != QLA_SREADING)
  274. goto out;
  275. rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
  276. ha->optrom_region_size);
  277. out:
  278. mutex_unlock(&ha->optrom_mutex);
  279. return rval;
  280. }
  281. static ssize_t
  282. qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
  283. struct bin_attribute *bin_attr,
  284. char *buf, loff_t off, size_t count)
  285. {
  286. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  287. struct device, kobj)));
  288. struct qla_hw_data *ha = vha->hw;
  289. mutex_lock(&ha->optrom_mutex);
  290. if (ha->optrom_state != QLA_SWRITING) {
  291. mutex_unlock(&ha->optrom_mutex);
  292. return -EINVAL;
  293. }
  294. if (off > ha->optrom_region_size) {
  295. mutex_unlock(&ha->optrom_mutex);
  296. return -ERANGE;
  297. }
  298. if (off + count > ha->optrom_region_size)
  299. count = ha->optrom_region_size - off;
  300. memcpy(&ha->optrom_buffer[off], buf, count);
  301. mutex_unlock(&ha->optrom_mutex);
  302. return count;
  303. }
  304. static struct bin_attribute sysfs_optrom_attr = {
  305. .attr = {
  306. .name = "optrom",
  307. .mode = S_IRUSR | S_IWUSR,
  308. },
  309. .size = 0,
  310. .read = qla2x00_sysfs_read_optrom,
  311. .write = qla2x00_sysfs_write_optrom,
  312. };
  313. static ssize_t
  314. qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
  315. struct bin_attribute *bin_attr,
  316. char *buf, loff_t off, size_t count)
  317. {
  318. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  319. struct device, kobj)));
  320. struct qla_hw_data *ha = vha->hw;
  321. uint32_t start = 0;
  322. uint32_t size = ha->optrom_size;
  323. int val, valid;
  324. ssize_t rval = count;
  325. if (off)
  326. return -EINVAL;
  327. if (unlikely(pci_channel_offline(ha->pdev)))
  328. return -EAGAIN;
  329. if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
  330. return -EINVAL;
  331. if (start > ha->optrom_size)
  332. return -EINVAL;
  333. if (size > ha->optrom_size - start)
  334. size = ha->optrom_size - start;
  335. mutex_lock(&ha->optrom_mutex);
  336. if (qla2x00_chip_is_down(vha)) {
  337. mutex_unlock(&ha->optrom_mutex);
  338. return -EAGAIN;
  339. }
  340. switch (val) {
  341. case 0:
  342. if (ha->optrom_state != QLA_SREADING &&
  343. ha->optrom_state != QLA_SWRITING) {
  344. rval = -EINVAL;
  345. goto out;
  346. }
  347. ha->optrom_state = QLA_SWAITING;
  348. ql_dbg(ql_dbg_user, vha, 0x7061,
  349. "Freeing flash region allocation -- 0x%x bytes.\n",
  350. ha->optrom_region_size);
  351. vfree(ha->optrom_buffer);
  352. ha->optrom_buffer = NULL;
  353. break;
  354. case 1:
  355. if (ha->optrom_state != QLA_SWAITING) {
  356. rval = -EINVAL;
  357. goto out;
  358. }
  359. ha->optrom_region_start = start;
  360. ha->optrom_region_size = size;
  361. ha->optrom_state = QLA_SREADING;
  362. ha->optrom_buffer = vzalloc(ha->optrom_region_size);
  363. if (ha->optrom_buffer == NULL) {
  364. ql_log(ql_log_warn, vha, 0x7062,
  365. "Unable to allocate memory for optrom retrieval "
  366. "(%x).\n", ha->optrom_region_size);
  367. ha->optrom_state = QLA_SWAITING;
  368. rval = -ENOMEM;
  369. goto out;
  370. }
  371. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  372. ql_log(ql_log_warn, vha, 0x7063,
  373. "HBA not online, failing NVRAM update.\n");
  374. rval = -EAGAIN;
  375. goto out;
  376. }
  377. ql_dbg(ql_dbg_user, vha, 0x7064,
  378. "Reading flash region -- 0x%x/0x%x.\n",
  379. ha->optrom_region_start, ha->optrom_region_size);
  380. ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
  381. ha->optrom_region_start, ha->optrom_region_size);
  382. break;
  383. case 2:
  384. if (ha->optrom_state != QLA_SWAITING) {
  385. rval = -EINVAL;
  386. goto out;
  387. }
  388. /*
  389. * We need to be more restrictive on which FLASH regions are
  390. * allowed to be updated via user-space. Regions accessible
  391. * via this method include:
  392. *
  393. * ISP21xx/ISP22xx/ISP23xx type boards:
  394. *
  395. * 0x000000 -> 0x020000 -- Boot code.
  396. *
  397. * ISP2322/ISP24xx type boards:
  398. *
  399. * 0x000000 -> 0x07ffff -- Boot code.
  400. * 0x080000 -> 0x0fffff -- Firmware.
  401. *
  402. * ISP25xx type boards:
  403. *
  404. * 0x000000 -> 0x07ffff -- Boot code.
  405. * 0x080000 -> 0x0fffff -- Firmware.
  406. * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
  407. *
  408. * > ISP25xx type boards:
  409. *
  410. * None -- should go through BSG.
  411. */
  412. valid = 0;
  413. if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
  414. valid = 1;
  415. else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
  416. valid = 1;
  417. if (!valid) {
  418. ql_log(ql_log_warn, vha, 0x7065,
  419. "Invalid start region 0x%x/0x%x.\n", start, size);
  420. rval = -EINVAL;
  421. goto out;
  422. }
  423. ha->optrom_region_start = start;
  424. ha->optrom_region_size = size;
  425. ha->optrom_state = QLA_SWRITING;
  426. ha->optrom_buffer = vzalloc(ha->optrom_region_size);
  427. if (ha->optrom_buffer == NULL) {
  428. ql_log(ql_log_warn, vha, 0x7066,
  429. "Unable to allocate memory for optrom update "
  430. "(%x)\n", ha->optrom_region_size);
  431. ha->optrom_state = QLA_SWAITING;
  432. rval = -ENOMEM;
  433. goto out;
  434. }
  435. ql_dbg(ql_dbg_user, vha, 0x7067,
  436. "Staging flash region write -- 0x%x/0x%x.\n",
  437. ha->optrom_region_start, ha->optrom_region_size);
  438. break;
  439. case 3:
  440. if (ha->optrom_state != QLA_SWRITING) {
  441. rval = -EINVAL;
  442. goto out;
  443. }
  444. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  445. ql_log(ql_log_warn, vha, 0x7068,
  446. "HBA not online, failing flash update.\n");
  447. rval = -EAGAIN;
  448. goto out;
  449. }
  450. ql_dbg(ql_dbg_user, vha, 0x7069,
  451. "Writing flash region -- 0x%x/0x%x.\n",
  452. ha->optrom_region_start, ha->optrom_region_size);
  453. rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
  454. ha->optrom_region_start, ha->optrom_region_size);
  455. if (rval)
  456. rval = -EIO;
  457. break;
  458. default:
  459. rval = -EINVAL;
  460. }
  461. out:
  462. mutex_unlock(&ha->optrom_mutex);
  463. return rval;
  464. }
  465. static struct bin_attribute sysfs_optrom_ctl_attr = {
  466. .attr = {
  467. .name = "optrom_ctl",
  468. .mode = S_IWUSR,
  469. },
  470. .size = 0,
  471. .write = qla2x00_sysfs_write_optrom_ctl,
  472. };
  473. static ssize_t
  474. qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
  475. struct bin_attribute *bin_attr,
  476. char *buf, loff_t off, size_t count)
  477. {
  478. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  479. struct device, kobj)));
  480. struct qla_hw_data *ha = vha->hw;
  481. uint32_t faddr;
  482. struct active_regions active_regions = { };
  483. if (unlikely(pci_channel_offline(ha->pdev)))
  484. return -EAGAIN;
  485. if (!capable(CAP_SYS_ADMIN))
  486. return -EINVAL;
  487. if (!IS_NOCACHE_VPD_TYPE(ha))
  488. goto skip;
  489. faddr = ha->flt_region_vpd << 2;
  490. if (IS_QLA28XX(ha)) {
  491. qla28xx_get_aux_images(vha, &active_regions);
  492. if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
  493. faddr = ha->flt_region_vpd_sec << 2;
  494. ql_dbg(ql_dbg_init, vha, 0x7070,
  495. "Loading %s nvram image.\n",
  496. active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
  497. "primary" : "secondary");
  498. }
  499. mutex_lock(&ha->optrom_mutex);
  500. if (qla2x00_chip_is_down(vha)) {
  501. mutex_unlock(&ha->optrom_mutex);
  502. return -EAGAIN;
  503. }
  504. ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
  505. mutex_unlock(&ha->optrom_mutex);
  506. ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
  507. skip:
  508. return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
  509. }
  510. static ssize_t
  511. qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
  512. struct bin_attribute *bin_attr,
  513. char *buf, loff_t off, size_t count)
  514. {
  515. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  516. struct device, kobj)));
  517. struct qla_hw_data *ha = vha->hw;
  518. uint8_t *tmp_data;
  519. if (unlikely(pci_channel_offline(ha->pdev)))
  520. return 0;
  521. if (qla2x00_chip_is_down(vha))
  522. return 0;
  523. if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
  524. !ha->isp_ops->write_nvram)
  525. return 0;
  526. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  527. ql_log(ql_log_warn, vha, 0x706a,
  528. "HBA not online, failing VPD update.\n");
  529. return -EAGAIN;
  530. }
  531. mutex_lock(&ha->optrom_mutex);
  532. if (qla2x00_chip_is_down(vha)) {
  533. mutex_unlock(&ha->optrom_mutex);
  534. return -EAGAIN;
  535. }
  536. /* Write NVRAM. */
  537. ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
  538. ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
  539. /* Update flash version information for 4Gb & above. */
  540. if (!IS_FWI2_CAPABLE(ha)) {
  541. mutex_unlock(&ha->optrom_mutex);
  542. return -EINVAL;
  543. }
  544. tmp_data = vmalloc(256);
  545. if (!tmp_data) {
  546. mutex_unlock(&ha->optrom_mutex);
  547. ql_log(ql_log_warn, vha, 0x706b,
  548. "Unable to allocate memory for VPD information update.\n");
  549. return -ENOMEM;
  550. }
  551. ha->isp_ops->get_flash_version(vha, tmp_data);
  552. vfree(tmp_data);
  553. mutex_unlock(&ha->optrom_mutex);
  554. return count;
  555. }
  556. static struct bin_attribute sysfs_vpd_attr = {
  557. .attr = {
  558. .name = "vpd",
  559. .mode = S_IRUSR | S_IWUSR,
  560. },
  561. .size = 0,
  562. .read = qla2x00_sysfs_read_vpd,
  563. .write = qla2x00_sysfs_write_vpd,
  564. };
  565. static ssize_t
  566. qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
  567. struct bin_attribute *bin_attr,
  568. char *buf, loff_t off, size_t count)
  569. {
  570. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  571. struct device, kobj)));
  572. int rval;
  573. if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
  574. return 0;
  575. mutex_lock(&vha->hw->optrom_mutex);
  576. if (qla2x00_chip_is_down(vha)) {
  577. mutex_unlock(&vha->hw->optrom_mutex);
  578. return 0;
  579. }
  580. rval = qla2x00_read_sfp_dev(vha, buf, count);
  581. mutex_unlock(&vha->hw->optrom_mutex);
  582. if (rval)
  583. return -EIO;
  584. return count;
  585. }
  586. static struct bin_attribute sysfs_sfp_attr = {
  587. .attr = {
  588. .name = "sfp",
  589. .mode = S_IRUSR | S_IWUSR,
  590. },
  591. .size = SFP_DEV_SIZE,
  592. .read = qla2x00_sysfs_read_sfp,
  593. };
  594. static ssize_t
  595. qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
  596. struct bin_attribute *bin_attr,
  597. char *buf, loff_t off, size_t count)
  598. {
  599. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  600. struct device, kobj)));
  601. struct qla_hw_data *ha = vha->hw;
  602. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  603. int type;
  604. uint32_t idc_control;
  605. uint8_t *tmp_data = NULL;
  606. if (off != 0)
  607. return -EINVAL;
  608. type = simple_strtol(buf, NULL, 10);
  609. switch (type) {
  610. case 0x2025c:
  611. ql_log(ql_log_info, vha, 0x706e,
  612. "Issuing ISP reset.\n");
  613. if (vha->hw->flags.port_isolated) {
  614. ql_log(ql_log_info, vha, 0x706e,
  615. "Port is isolated, returning.\n");
  616. return -EINVAL;
  617. }
  618. scsi_block_requests(vha->host);
  619. if (IS_QLA82XX(ha)) {
  620. ha->flags.isp82xx_no_md_cap = 1;
  621. qla82xx_idc_lock(ha);
  622. qla82xx_set_reset_owner(vha);
  623. qla82xx_idc_unlock(ha);
  624. } else if (IS_QLA8044(ha)) {
  625. qla8044_idc_lock(ha);
  626. idc_control = qla8044_rd_reg(ha,
  627. QLA8044_IDC_DRV_CTRL);
  628. qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
  629. (idc_control | GRACEFUL_RESET_BIT1));
  630. qla82xx_set_reset_owner(vha);
  631. qla8044_idc_unlock(ha);
  632. } else {
  633. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  634. qla2xxx_wake_dpc(vha);
  635. }
  636. qla2x00_wait_for_chip_reset(vha);
  637. scsi_unblock_requests(vha->host);
  638. break;
  639. case 0x2025d:
  640. if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  641. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  642. return -EPERM;
  643. ql_log(ql_log_info, vha, 0x706f,
  644. "Issuing MPI reset.\n");
  645. if (IS_QLA83XX(ha)) {
  646. uint32_t idc_control;
  647. qla83xx_idc_lock(vha, 0);
  648. __qla83xx_get_idc_control(vha, &idc_control);
  649. idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
  650. __qla83xx_set_idc_control(vha, idc_control);
  651. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
  652. QLA8XXX_DEV_NEED_RESET);
  653. qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
  654. qla83xx_idc_unlock(vha, 0);
  655. break;
  656. } else {
  657. /* Make sure FC side is not in reset */
  658. WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
  659. QLA_SUCCESS);
  660. /* Issue MPI reset */
  661. scsi_block_requests(vha->host);
  662. if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
  663. ql_log(ql_log_warn, vha, 0x7070,
  664. "MPI reset failed.\n");
  665. scsi_unblock_requests(vha->host);
  666. break;
  667. }
  668. break;
  669. case 0x2025e:
  670. if (!IS_P3P_TYPE(ha) || vha != base_vha) {
  671. ql_log(ql_log_info, vha, 0x7071,
  672. "FCoE ctx reset not supported.\n");
  673. return -EPERM;
  674. }
  675. ql_log(ql_log_info, vha, 0x7072,
  676. "Issuing FCoE ctx reset.\n");
  677. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  678. qla2xxx_wake_dpc(vha);
  679. qla2x00_wait_for_fcoe_ctx_reset(vha);
  680. break;
  681. case 0x2025f:
  682. if (!IS_QLA8031(ha))
  683. return -EPERM;
  684. ql_log(ql_log_info, vha, 0x70bc,
  685. "Disabling Reset by IDC control\n");
  686. qla83xx_idc_lock(vha, 0);
  687. __qla83xx_get_idc_control(vha, &idc_control);
  688. idc_control |= QLA83XX_IDC_RESET_DISABLED;
  689. __qla83xx_set_idc_control(vha, idc_control);
  690. qla83xx_idc_unlock(vha, 0);
  691. break;
  692. case 0x20260:
  693. if (!IS_QLA8031(ha))
  694. return -EPERM;
  695. ql_log(ql_log_info, vha, 0x70bd,
  696. "Enabling Reset by IDC control\n");
  697. qla83xx_idc_lock(vha, 0);
  698. __qla83xx_get_idc_control(vha, &idc_control);
  699. idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
  700. __qla83xx_set_idc_control(vha, idc_control);
  701. qla83xx_idc_unlock(vha, 0);
  702. break;
  703. case 0x20261:
  704. ql_dbg(ql_dbg_user, vha, 0x70e0,
  705. "Updating cache versions without reset ");
  706. tmp_data = vmalloc(256);
  707. if (!tmp_data) {
  708. ql_log(ql_log_warn, vha, 0x70e1,
  709. "Unable to allocate memory for VPD information update.\n");
  710. return -ENOMEM;
  711. }
  712. ha->isp_ops->get_flash_version(vha, tmp_data);
  713. vfree(tmp_data);
  714. break;
  715. }
  716. return count;
  717. }
  718. static struct bin_attribute sysfs_reset_attr = {
  719. .attr = {
  720. .name = "reset",
  721. .mode = S_IWUSR,
  722. },
  723. .size = 0,
  724. .write = qla2x00_sysfs_write_reset,
  725. };
  726. static ssize_t
  727. qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
  728. struct bin_attribute *bin_attr,
  729. char *buf, loff_t off, size_t count)
  730. {
  731. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  732. struct device, kobj)));
  733. int type;
  734. port_id_t did;
  735. if (!capable(CAP_SYS_ADMIN))
  736. return 0;
  737. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  738. return 0;
  739. if (qla2x00_chip_is_down(vha))
  740. return 0;
  741. type = simple_strtol(buf, NULL, 10);
  742. did.b.domain = (type & 0x00ff0000) >> 16;
  743. did.b.area = (type & 0x0000ff00) >> 8;
  744. did.b.al_pa = (type & 0x000000ff);
  745. ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
  746. did.b.domain, did.b.area, did.b.al_pa);
  747. ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
  748. qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
  749. return count;
  750. }
  751. static struct bin_attribute sysfs_issue_logo_attr = {
  752. .attr = {
  753. .name = "issue_logo",
  754. .mode = S_IWUSR,
  755. },
  756. .size = 0,
  757. .write = qla2x00_issue_logo,
  758. };
  759. static ssize_t
  760. qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
  761. struct bin_attribute *bin_attr,
  762. char *buf, loff_t off, size_t count)
  763. {
  764. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  765. struct device, kobj)));
  766. struct qla_hw_data *ha = vha->hw;
  767. int rval;
  768. uint16_t actual_size;
  769. if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
  770. return 0;
  771. if (unlikely(pci_channel_offline(ha->pdev)))
  772. return 0;
  773. mutex_lock(&vha->hw->optrom_mutex);
  774. if (qla2x00_chip_is_down(vha)) {
  775. mutex_unlock(&vha->hw->optrom_mutex);
  776. return 0;
  777. }
  778. if (ha->xgmac_data)
  779. goto do_read;
  780. ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
  781. &ha->xgmac_data_dma, GFP_KERNEL);
  782. if (!ha->xgmac_data) {
  783. mutex_unlock(&vha->hw->optrom_mutex);
  784. ql_log(ql_log_warn, vha, 0x7076,
  785. "Unable to allocate memory for XGMAC read-data.\n");
  786. return 0;
  787. }
  788. do_read:
  789. actual_size = 0;
  790. memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
  791. rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
  792. XGMAC_DATA_SIZE, &actual_size);
  793. mutex_unlock(&vha->hw->optrom_mutex);
  794. if (rval != QLA_SUCCESS) {
  795. ql_log(ql_log_warn, vha, 0x7077,
  796. "Unable to read XGMAC data (%x).\n", rval);
  797. count = 0;
  798. }
  799. count = actual_size > count ? count : actual_size;
  800. memcpy(buf, ha->xgmac_data, count);
  801. return count;
  802. }
  803. static struct bin_attribute sysfs_xgmac_stats_attr = {
  804. .attr = {
  805. .name = "xgmac_stats",
  806. .mode = S_IRUSR,
  807. },
  808. .size = 0,
  809. .read = qla2x00_sysfs_read_xgmac_stats,
  810. };
  811. static ssize_t
  812. qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
  813. struct bin_attribute *bin_attr,
  814. char *buf, loff_t off, size_t count)
  815. {
  816. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  817. struct device, kobj)));
  818. struct qla_hw_data *ha = vha->hw;
  819. int rval;
  820. if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
  821. return 0;
  822. mutex_lock(&vha->hw->optrom_mutex);
  823. if (ha->dcbx_tlv)
  824. goto do_read;
  825. if (qla2x00_chip_is_down(vha)) {
  826. mutex_unlock(&vha->hw->optrom_mutex);
  827. return 0;
  828. }
  829. ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
  830. &ha->dcbx_tlv_dma, GFP_KERNEL);
  831. if (!ha->dcbx_tlv) {
  832. mutex_unlock(&vha->hw->optrom_mutex);
  833. ql_log(ql_log_warn, vha, 0x7078,
  834. "Unable to allocate memory for DCBX TLV read-data.\n");
  835. return -ENOMEM;
  836. }
  837. do_read:
  838. memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
  839. rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
  840. DCBX_TLV_DATA_SIZE);
  841. mutex_unlock(&vha->hw->optrom_mutex);
  842. if (rval != QLA_SUCCESS) {
  843. ql_log(ql_log_warn, vha, 0x7079,
  844. "Unable to read DCBX TLV (%x).\n", rval);
  845. return -EIO;
  846. }
  847. memcpy(buf, ha->dcbx_tlv, count);
  848. return count;
  849. }
  850. static struct bin_attribute sysfs_dcbx_tlv_attr = {
  851. .attr = {
  852. .name = "dcbx_tlv",
  853. .mode = S_IRUSR,
  854. },
  855. .size = 0,
  856. .read = qla2x00_sysfs_read_dcbx_tlv,
  857. };
  858. static struct sysfs_entry {
  859. char *name;
  860. struct bin_attribute *attr;
  861. int type;
  862. } bin_file_entries[] = {
  863. { "fw_dump", &sysfs_fw_dump_attr, },
  864. { "nvram", &sysfs_nvram_attr, },
  865. { "optrom", &sysfs_optrom_attr, },
  866. { "optrom_ctl", &sysfs_optrom_ctl_attr, },
  867. { "vpd", &sysfs_vpd_attr, 1 },
  868. { "sfp", &sysfs_sfp_attr, 1 },
  869. { "reset", &sysfs_reset_attr, },
  870. { "issue_logo", &sysfs_issue_logo_attr, },
  871. { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
  872. { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
  873. { NULL },
  874. };
  875. void
  876. qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
  877. {
  878. struct Scsi_Host *host = vha->host;
  879. struct sysfs_entry *iter;
  880. int ret;
  881. for (iter = bin_file_entries; iter->name; iter++) {
  882. if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
  883. continue;
  884. if (iter->type == 2 && !IS_QLA25XX(vha->hw))
  885. continue;
  886. if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
  887. continue;
  888. ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
  889. iter->attr);
  890. if (ret)
  891. ql_log(ql_log_warn, vha, 0x00f3,
  892. "Unable to create sysfs %s binary attribute (%d).\n",
  893. iter->name, ret);
  894. else
  895. ql_dbg(ql_dbg_init, vha, 0x00f4,
  896. "Successfully created sysfs %s binary attribute.\n",
  897. iter->name);
  898. }
  899. }
  900. void
  901. qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
  902. {
  903. struct Scsi_Host *host = vha->host;
  904. struct sysfs_entry *iter;
  905. struct qla_hw_data *ha = vha->hw;
  906. for (iter = bin_file_entries; iter->name; iter++) {
  907. if (iter->type && !IS_FWI2_CAPABLE(ha))
  908. continue;
  909. if (iter->type == 2 && !IS_QLA25XX(ha))
  910. continue;
  911. if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
  912. continue;
  913. sysfs_remove_bin_file(&host->shost_gendev.kobj,
  914. iter->attr);
  915. }
  916. if (stop_beacon && ha->beacon_blink_led == 1)
  917. ha->isp_ops->beacon_off(vha);
  918. }
  919. /* Scsi_Host attributes. */
  920. static ssize_t
  921. qla2x00_driver_version_show(struct device *dev,
  922. struct device_attribute *attr, char *buf)
  923. {
  924. return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
  925. }
  926. static ssize_t
  927. qla2x00_fw_version_show(struct device *dev,
  928. struct device_attribute *attr, char *buf)
  929. {
  930. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  931. struct qla_hw_data *ha = vha->hw;
  932. char fw_str[128];
  933. return scnprintf(buf, PAGE_SIZE, "%s\n",
  934. ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
  935. }
  936. static ssize_t
  937. qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
  938. char *buf)
  939. {
  940. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  941. struct qla_hw_data *ha = vha->hw;
  942. uint32_t sn;
  943. if (IS_QLAFX00(vha->hw)) {
  944. return scnprintf(buf, PAGE_SIZE, "%s\n",
  945. vha->hw->mr.serial_num);
  946. } else if (IS_FWI2_CAPABLE(ha)) {
  947. qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
  948. return strlen(strcat(buf, "\n"));
  949. }
  950. sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
  951. return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
  952. sn % 100000);
  953. }
  954. static ssize_t
  955. qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
  956. char *buf)
  957. {
  958. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  959. return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
  960. }
  961. static ssize_t
  962. qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
  963. char *buf)
  964. {
  965. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  966. struct qla_hw_data *ha = vha->hw;
  967. if (IS_QLAFX00(vha->hw))
  968. return scnprintf(buf, PAGE_SIZE, "%s\n",
  969. vha->hw->mr.hw_version);
  970. return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
  971. ha->product_id[0], ha->product_id[1], ha->product_id[2],
  972. ha->product_id[3]);
  973. }
  974. static ssize_t
  975. qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
  976. char *buf)
  977. {
  978. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  979. return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
  980. }
  981. static ssize_t
  982. qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
  983. char *buf)
  984. {
  985. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  986. return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
  987. }
  988. static ssize_t
  989. qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
  990. char *buf)
  991. {
  992. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  993. char pci_info[30];
  994. return scnprintf(buf, PAGE_SIZE, "%s\n",
  995. vha->hw->isp_ops->pci_info_str(vha, pci_info,
  996. sizeof(pci_info)));
  997. }
  998. static ssize_t
  999. qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
  1000. char *buf)
  1001. {
  1002. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1003. struct qla_hw_data *ha = vha->hw;
  1004. int len = 0;
  1005. if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
  1006. atomic_read(&vha->loop_state) == LOOP_DEAD ||
  1007. vha->device_flags & DFLG_NO_CABLE)
  1008. len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
  1009. else if (atomic_read(&vha->loop_state) != LOOP_READY ||
  1010. qla2x00_chip_is_down(vha))
  1011. len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
  1012. else {
  1013. len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
  1014. switch (ha->current_topology) {
  1015. case ISP_CFG_NL:
  1016. len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
  1017. break;
  1018. case ISP_CFG_FL:
  1019. len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
  1020. break;
  1021. case ISP_CFG_N:
  1022. len += scnprintf(buf + len, PAGE_SIZE-len,
  1023. "N_Port to N_Port\n");
  1024. break;
  1025. case ISP_CFG_F:
  1026. len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
  1027. break;
  1028. default:
  1029. len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
  1030. break;
  1031. }
  1032. }
  1033. return len;
  1034. }
  1035. static ssize_t
  1036. qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
  1037. char *buf)
  1038. {
  1039. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1040. int len = 0;
  1041. switch (vha->hw->zio_mode) {
  1042. case QLA_ZIO_MODE_6:
  1043. len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
  1044. break;
  1045. case QLA_ZIO_DISABLED:
  1046. len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
  1047. break;
  1048. }
  1049. return len;
  1050. }
  1051. static ssize_t
  1052. qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
  1053. const char *buf, size_t count)
  1054. {
  1055. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1056. struct qla_hw_data *ha = vha->hw;
  1057. int val = 0;
  1058. uint16_t zio_mode;
  1059. if (!IS_ZIO_SUPPORTED(ha))
  1060. return -ENOTSUPP;
  1061. if (sscanf(buf, "%d", &val) != 1)
  1062. return -EINVAL;
  1063. if (val)
  1064. zio_mode = QLA_ZIO_MODE_6;
  1065. else
  1066. zio_mode = QLA_ZIO_DISABLED;
  1067. /* Update per-hba values and queue a reset. */
  1068. if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
  1069. ha->zio_mode = zio_mode;
  1070. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1071. }
  1072. return strlen(buf);
  1073. }
  1074. static ssize_t
  1075. qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
  1076. char *buf)
  1077. {
  1078. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1079. return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
  1080. }
  1081. static ssize_t
  1082. qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
  1083. const char *buf, size_t count)
  1084. {
  1085. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1086. int val = 0;
  1087. uint16_t zio_timer;
  1088. if (sscanf(buf, "%d", &val) != 1)
  1089. return -EINVAL;
  1090. if (val > 25500 || val < 100)
  1091. return -ERANGE;
  1092. zio_timer = (uint16_t)(val / 100);
  1093. vha->hw->zio_timer = zio_timer;
  1094. return strlen(buf);
  1095. }
  1096. static ssize_t
  1097. qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
  1098. char *buf)
  1099. {
  1100. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1101. return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
  1102. vha->hw->last_zio_threshold);
  1103. }
  1104. static ssize_t
  1105. qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
  1106. const char *buf, size_t count)
  1107. {
  1108. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1109. int val = 0;
  1110. if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
  1111. return -EINVAL;
  1112. if (sscanf(buf, "%d", &val) != 1)
  1113. return -EINVAL;
  1114. if (val < 0 || val > 256)
  1115. return -ERANGE;
  1116. atomic_set(&vha->hw->zio_threshold, val);
  1117. return strlen(buf);
  1118. }
  1119. static ssize_t
  1120. qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
  1121. char *buf)
  1122. {
  1123. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1124. int len = 0;
  1125. if (vha->hw->beacon_blink_led)
  1126. len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
  1127. else
  1128. len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
  1129. return len;
  1130. }
  1131. static ssize_t
  1132. qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
  1133. const char *buf, size_t count)
  1134. {
  1135. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1136. struct qla_hw_data *ha = vha->hw;
  1137. int val = 0;
  1138. int rval;
  1139. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  1140. return -EPERM;
  1141. if (sscanf(buf, "%d", &val) != 1)
  1142. return -EINVAL;
  1143. mutex_lock(&vha->hw->optrom_mutex);
  1144. if (qla2x00_chip_is_down(vha)) {
  1145. mutex_unlock(&vha->hw->optrom_mutex);
  1146. ql_log(ql_log_warn, vha, 0x707a,
  1147. "Abort ISP active -- ignoring beacon request.\n");
  1148. return -EBUSY;
  1149. }
  1150. if (val)
  1151. rval = ha->isp_ops->beacon_on(vha);
  1152. else
  1153. rval = ha->isp_ops->beacon_off(vha);
  1154. if (rval != QLA_SUCCESS)
  1155. count = 0;
  1156. mutex_unlock(&vha->hw->optrom_mutex);
  1157. return count;
  1158. }
  1159. static ssize_t
  1160. qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr,
  1161. char *buf)
  1162. {
  1163. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1164. struct qla_hw_data *ha = vha->hw;
  1165. uint16_t led[3] = { 0 };
  1166. if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1167. return -EPERM;
  1168. if (ql26xx_led_config(vha, 0, led))
  1169. return scnprintf(buf, PAGE_SIZE, "\n");
  1170. return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
  1171. led[0], led[1], led[2]);
  1172. }
  1173. static ssize_t
  1174. qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr,
  1175. const char *buf, size_t count)
  1176. {
  1177. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1178. struct qla_hw_data *ha = vha->hw;
  1179. uint16_t options = BIT_0;
  1180. uint16_t led[3] = { 0 };
  1181. uint16_t word[4];
  1182. int n;
  1183. if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1184. return -EPERM;
  1185. n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3);
  1186. if (n == 4) {
  1187. if (word[0] == 3) {
  1188. options |= BIT_3|BIT_2|BIT_1;
  1189. led[0] = word[1];
  1190. led[1] = word[2];
  1191. led[2] = word[3];
  1192. goto write;
  1193. }
  1194. return -EINVAL;
  1195. }
  1196. if (n == 2) {
  1197. /* check led index */
  1198. if (word[0] == 0) {
  1199. options |= BIT_2;
  1200. led[0] = word[1];
  1201. goto write;
  1202. }
  1203. if (word[0] == 1) {
  1204. options |= BIT_3;
  1205. led[1] = word[1];
  1206. goto write;
  1207. }
  1208. if (word[0] == 2) {
  1209. options |= BIT_1;
  1210. led[2] = word[1];
  1211. goto write;
  1212. }
  1213. return -EINVAL;
  1214. }
  1215. return -EINVAL;
  1216. write:
  1217. if (ql26xx_led_config(vha, options, led))
  1218. return -EFAULT;
  1219. return count;
  1220. }
  1221. static ssize_t
  1222. qla2x00_optrom_bios_version_show(struct device *dev,
  1223. struct device_attribute *attr, char *buf)
  1224. {
  1225. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1226. struct qla_hw_data *ha = vha->hw;
  1227. return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
  1228. ha->bios_revision[0]);
  1229. }
  1230. static ssize_t
  1231. qla2x00_optrom_efi_version_show(struct device *dev,
  1232. struct device_attribute *attr, char *buf)
  1233. {
  1234. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1235. struct qla_hw_data *ha = vha->hw;
  1236. return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
  1237. ha->efi_revision[0]);
  1238. }
  1239. static ssize_t
  1240. qla2x00_optrom_fcode_version_show(struct device *dev,
  1241. struct device_attribute *attr, char *buf)
  1242. {
  1243. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1244. struct qla_hw_data *ha = vha->hw;
  1245. return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
  1246. ha->fcode_revision[0]);
  1247. }
  1248. static ssize_t
  1249. qla2x00_optrom_fw_version_show(struct device *dev,
  1250. struct device_attribute *attr, char *buf)
  1251. {
  1252. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1253. struct qla_hw_data *ha = vha->hw;
  1254. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
  1255. ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
  1256. ha->fw_revision[3]);
  1257. }
  1258. static ssize_t
  1259. qla2x00_optrom_gold_fw_version_show(struct device *dev,
  1260. struct device_attribute *attr, char *buf)
  1261. {
  1262. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1263. struct qla_hw_data *ha = vha->hw;
  1264. if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  1265. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1266. return scnprintf(buf, PAGE_SIZE, "\n");
  1267. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
  1268. ha->gold_fw_version[0], ha->gold_fw_version[1],
  1269. ha->gold_fw_version[2], ha->gold_fw_version[3]);
  1270. }
  1271. static ssize_t
  1272. qla2x00_total_isp_aborts_show(struct device *dev,
  1273. struct device_attribute *attr, char *buf)
  1274. {
  1275. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1276. return scnprintf(buf, PAGE_SIZE, "%d\n",
  1277. vha->qla_stats.total_isp_aborts);
  1278. }
  1279. static ssize_t
  1280. qla24xx_84xx_fw_version_show(struct device *dev,
  1281. struct device_attribute *attr, char *buf)
  1282. {
  1283. int rval = QLA_SUCCESS;
  1284. uint16_t status[2] = { 0 };
  1285. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1286. struct qla_hw_data *ha = vha->hw;
  1287. if (!IS_QLA84XX(ha))
  1288. return scnprintf(buf, PAGE_SIZE, "\n");
  1289. if (!ha->cs84xx->op_fw_version) {
  1290. rval = qla84xx_verify_chip(vha, status);
  1291. if (!rval && !status[0])
  1292. return scnprintf(buf, PAGE_SIZE, "%u\n",
  1293. (uint32_t)ha->cs84xx->op_fw_version);
  1294. }
  1295. return scnprintf(buf, PAGE_SIZE, "\n");
  1296. }
  1297. static ssize_t
  1298. qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
  1299. char *buf)
  1300. {
  1301. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1302. struct qla_hw_data *ha = vha->hw;
  1303. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1304. return scnprintf(buf, PAGE_SIZE, "\n");
  1305. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
  1306. ha->serdes_version[0], ha->serdes_version[1],
  1307. ha->serdes_version[2]);
  1308. }
  1309. static ssize_t
  1310. qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
  1311. char *buf)
  1312. {
  1313. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1314. struct qla_hw_data *ha = vha->hw;
  1315. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
  1316. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1317. return scnprintf(buf, PAGE_SIZE, "\n");
  1318. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
  1319. ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
  1320. ha->mpi_capabilities);
  1321. }
  1322. static ssize_t
  1323. qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
  1324. char *buf)
  1325. {
  1326. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1327. struct qla_hw_data *ha = vha->hw;
  1328. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
  1329. return scnprintf(buf, PAGE_SIZE, "\n");
  1330. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
  1331. ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
  1332. }
  1333. static ssize_t
  1334. qla2x00_flash_block_size_show(struct device *dev,
  1335. struct device_attribute *attr, char *buf)
  1336. {
  1337. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1338. struct qla_hw_data *ha = vha->hw;
  1339. return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
  1340. }
  1341. static ssize_t
  1342. qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
  1343. char *buf)
  1344. {
  1345. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1346. if (!IS_CNA_CAPABLE(vha->hw))
  1347. return scnprintf(buf, PAGE_SIZE, "\n");
  1348. return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
  1349. }
  1350. static ssize_t
  1351. qla2x00_vn_port_mac_address_show(struct device *dev,
  1352. struct device_attribute *attr, char *buf)
  1353. {
  1354. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1355. if (!IS_CNA_CAPABLE(vha->hw))
  1356. return scnprintf(buf, PAGE_SIZE, "\n");
  1357. return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
  1358. }
  1359. static ssize_t
  1360. qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
  1361. char *buf)
  1362. {
  1363. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1364. return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
  1365. }
  1366. static ssize_t
  1367. qla2x00_thermal_temp_show(struct device *dev,
  1368. struct device_attribute *attr, char *buf)
  1369. {
  1370. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1371. uint16_t temp = 0;
  1372. int rc;
  1373. mutex_lock(&vha->hw->optrom_mutex);
  1374. if (qla2x00_chip_is_down(vha)) {
  1375. mutex_unlock(&vha->hw->optrom_mutex);
  1376. ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
  1377. goto done;
  1378. }
  1379. if (vha->hw->flags.eeh_busy) {
  1380. mutex_unlock(&vha->hw->optrom_mutex);
  1381. ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
  1382. goto done;
  1383. }
  1384. rc = qla2x00_get_thermal_temp(vha, &temp);
  1385. mutex_unlock(&vha->hw->optrom_mutex);
  1386. if (rc == QLA_SUCCESS)
  1387. return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
  1388. done:
  1389. return scnprintf(buf, PAGE_SIZE, "\n");
  1390. }
  1391. static ssize_t
  1392. qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
  1393. char *buf)
  1394. {
  1395. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1396. int rval = QLA_FUNCTION_FAILED;
  1397. uint16_t state[6];
  1398. uint32_t pstate;
  1399. if (IS_QLAFX00(vha->hw)) {
  1400. pstate = qlafx00_fw_state_show(dev, attr, buf);
  1401. return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
  1402. }
  1403. mutex_lock(&vha->hw->optrom_mutex);
  1404. if (qla2x00_chip_is_down(vha)) {
  1405. mutex_unlock(&vha->hw->optrom_mutex);
  1406. ql_log(ql_log_warn, vha, 0x707c,
  1407. "ISP reset active.\n");
  1408. goto out;
  1409. } else if (vha->hw->flags.eeh_busy) {
  1410. mutex_unlock(&vha->hw->optrom_mutex);
  1411. goto out;
  1412. }
  1413. rval = qla2x00_get_firmware_state(vha, state);
  1414. mutex_unlock(&vha->hw->optrom_mutex);
  1415. out:
  1416. if (rval != QLA_SUCCESS) {
  1417. memset(state, -1, sizeof(state));
  1418. rval = qla2x00_get_firmware_state(vha, state);
  1419. }
  1420. return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
  1421. state[0], state[1], state[2], state[3], state[4], state[5]);
  1422. }
  1423. static ssize_t
  1424. qla2x00_diag_requests_show(struct device *dev,
  1425. struct device_attribute *attr, char *buf)
  1426. {
  1427. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1428. if (!IS_BIDI_CAPABLE(vha->hw))
  1429. return scnprintf(buf, PAGE_SIZE, "\n");
  1430. return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
  1431. }
  1432. static ssize_t
  1433. qla2x00_diag_megabytes_show(struct device *dev,
  1434. struct device_attribute *attr, char *buf)
  1435. {
  1436. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1437. if (!IS_BIDI_CAPABLE(vha->hw))
  1438. return scnprintf(buf, PAGE_SIZE, "\n");
  1439. return scnprintf(buf, PAGE_SIZE, "%llu\n",
  1440. vha->bidi_stats.transfer_bytes >> 20);
  1441. }
  1442. static ssize_t
  1443. qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
  1444. char *buf)
  1445. {
  1446. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1447. struct qla_hw_data *ha = vha->hw;
  1448. uint32_t size;
  1449. if (!ha->fw_dumped)
  1450. size = 0;
  1451. else if (IS_P3P_TYPE(ha))
  1452. size = ha->md_template_size + ha->md_dump_size;
  1453. else
  1454. size = ha->fw_dump_len;
  1455. return scnprintf(buf, PAGE_SIZE, "%d\n", size);
  1456. }
  1457. static ssize_t
  1458. qla2x00_allow_cna_fw_dump_show(struct device *dev,
  1459. struct device_attribute *attr, char *buf)
  1460. {
  1461. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1462. if (!IS_P3P_TYPE(vha->hw))
  1463. return scnprintf(buf, PAGE_SIZE, "\n");
  1464. else
  1465. return scnprintf(buf, PAGE_SIZE, "%s\n",
  1466. vha->hw->allow_cna_fw_dump ? "true" : "false");
  1467. }
  1468. static ssize_t
  1469. qla2x00_allow_cna_fw_dump_store(struct device *dev,
  1470. struct device_attribute *attr, const char *buf, size_t count)
  1471. {
  1472. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1473. int val = 0;
  1474. if (!IS_P3P_TYPE(vha->hw))
  1475. return -EINVAL;
  1476. if (sscanf(buf, "%d", &val) != 1)
  1477. return -EINVAL;
  1478. vha->hw->allow_cna_fw_dump = val != 0;
  1479. return strlen(buf);
  1480. }
  1481. static ssize_t
  1482. qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
  1483. char *buf)
  1484. {
  1485. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1486. struct qla_hw_data *ha = vha->hw;
  1487. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1488. return scnprintf(buf, PAGE_SIZE, "\n");
  1489. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
  1490. ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
  1491. }
  1492. static ssize_t
  1493. qla2x00_min_supported_speed_show(struct device *dev,
  1494. struct device_attribute *attr, char *buf)
  1495. {
  1496. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1497. struct qla_hw_data *ha = vha->hw;
  1498. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1499. return scnprintf(buf, PAGE_SIZE, "\n");
  1500. return scnprintf(buf, PAGE_SIZE, "%s\n",
  1501. ha->min_supported_speed == 6 ? "64Gps" :
  1502. ha->min_supported_speed == 5 ? "32Gps" :
  1503. ha->min_supported_speed == 4 ? "16Gps" :
  1504. ha->min_supported_speed == 3 ? "8Gps" :
  1505. ha->min_supported_speed == 2 ? "4Gps" :
  1506. ha->min_supported_speed != 0 ? "unknown" : "");
  1507. }
  1508. static ssize_t
  1509. qla2x00_max_supported_speed_show(struct device *dev,
  1510. struct device_attribute *attr, char *buf)
  1511. {
  1512. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1513. struct qla_hw_data *ha = vha->hw;
  1514. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1515. return scnprintf(buf, PAGE_SIZE, "\n");
  1516. return scnprintf(buf, PAGE_SIZE, "%s\n",
  1517. ha->max_supported_speed == 2 ? "64Gps" :
  1518. ha->max_supported_speed == 1 ? "32Gps" :
  1519. ha->max_supported_speed == 0 ? "16Gps" : "unknown");
  1520. }
  1521. static ssize_t
  1522. qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
  1523. const char *buf, size_t count)
  1524. {
  1525. struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
  1526. ulong type, speed;
  1527. int oldspeed, rval;
  1528. int mode = QLA_SET_DATA_RATE_LR;
  1529. struct qla_hw_data *ha = vha->hw;
  1530. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
  1531. ql_log(ql_log_warn, vha, 0x70d8,
  1532. "Speed setting not supported \n");
  1533. return -EINVAL;
  1534. }
  1535. rval = kstrtol(buf, 10, &type);
  1536. if (rval)
  1537. return rval;
  1538. speed = type;
  1539. if (type == 40 || type == 80 || type == 160 ||
  1540. type == 320) {
  1541. ql_dbg(ql_dbg_user, vha, 0x70d9,
  1542. "Setting will be affected after a loss of sync\n");
  1543. type = type/10;
  1544. mode = QLA_SET_DATA_RATE_NOLR;
  1545. }
  1546. oldspeed = ha->set_data_rate;
  1547. switch (type) {
  1548. case 0:
  1549. ha->set_data_rate = PORT_SPEED_AUTO;
  1550. break;
  1551. case 4:
  1552. ha->set_data_rate = PORT_SPEED_4GB;
  1553. break;
  1554. case 8:
  1555. ha->set_data_rate = PORT_SPEED_8GB;
  1556. break;
  1557. case 16:
  1558. ha->set_data_rate = PORT_SPEED_16GB;
  1559. break;
  1560. case 32:
  1561. ha->set_data_rate = PORT_SPEED_32GB;
  1562. break;
  1563. default:
  1564. ql_log(ql_log_warn, vha, 0x1199,
  1565. "Unrecognized speed setting:%lx. Setting Autoneg\n",
  1566. speed);
  1567. ha->set_data_rate = PORT_SPEED_AUTO;
  1568. }
  1569. if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
  1570. return -EINVAL;
  1571. ql_log(ql_log_info, vha, 0x70da,
  1572. "Setting speed to %lx Gbps \n", type);
  1573. rval = qla2x00_set_data_rate(vha, mode);
  1574. if (rval != QLA_SUCCESS)
  1575. return -EIO;
  1576. return strlen(buf);
  1577. }
  1578. static const struct {
  1579. u16 rate;
  1580. char *str;
  1581. } port_speed_str[] = {
  1582. { PORT_SPEED_4GB, "4" },
  1583. { PORT_SPEED_8GB, "8" },
  1584. { PORT_SPEED_16GB, "16" },
  1585. { PORT_SPEED_32GB, "32" },
  1586. { PORT_SPEED_64GB, "64" },
  1587. { PORT_SPEED_10GB, "10" },
  1588. };
  1589. static ssize_t
  1590. qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
  1591. char *buf)
  1592. {
  1593. struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
  1594. struct qla_hw_data *ha = vha->hw;
  1595. ssize_t rval;
  1596. u16 i;
  1597. char *speed = "Unknown";
  1598. rval = qla2x00_get_data_rate(vha);
  1599. if (rval != QLA_SUCCESS) {
  1600. ql_log(ql_log_warn, vha, 0x70db,
  1601. "Unable to get port speed rval:%zd\n", rval);
  1602. return -EINVAL;
  1603. }
  1604. for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) {
  1605. if (port_speed_str[i].rate != ha->link_data_rate)
  1606. continue;
  1607. speed = port_speed_str[i].str;
  1608. break;
  1609. }
  1610. return scnprintf(buf, PAGE_SIZE, "%s\n", speed);
  1611. }
  1612. static ssize_t
  1613. qla2x00_mpi_pause_store(struct device *dev,
  1614. struct device_attribute *attr, const char *buf, size_t count)
  1615. {
  1616. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1617. int rval = 0;
  1618. if (sscanf(buf, "%d", &rval) != 1)
  1619. return -EINVAL;
  1620. ql_log(ql_log_warn, vha, 0x7089, "Pausing MPI...\n");
  1621. rval = qla83xx_wr_reg(vha, 0x002012d4, 0x30000001);
  1622. if (rval != QLA_SUCCESS) {
  1623. ql_log(ql_log_warn, vha, 0x708a, "Unable to pause MPI.\n");
  1624. count = 0;
  1625. }
  1626. return count;
  1627. }
  1628. static DEVICE_ATTR(mpi_pause, S_IWUSR, NULL, qla2x00_mpi_pause_store);
  1629. /* ----- */
  1630. static ssize_t
  1631. qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
  1632. {
  1633. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1634. int len = 0;
  1635. len += scnprintf(buf + len, PAGE_SIZE-len,
  1636. "Supported options: enabled | disabled | dual | exclusive\n");
  1637. /* --- */
  1638. len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
  1639. switch (vha->qlini_mode) {
  1640. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1641. len += scnprintf(buf + len, PAGE_SIZE-len,
  1642. QLA2XXX_INI_MODE_STR_EXCLUSIVE);
  1643. break;
  1644. case QLA2XXX_INI_MODE_DISABLED:
  1645. len += scnprintf(buf + len, PAGE_SIZE-len,
  1646. QLA2XXX_INI_MODE_STR_DISABLED);
  1647. break;
  1648. case QLA2XXX_INI_MODE_ENABLED:
  1649. len += scnprintf(buf + len, PAGE_SIZE-len,
  1650. QLA2XXX_INI_MODE_STR_ENABLED);
  1651. break;
  1652. case QLA2XXX_INI_MODE_DUAL:
  1653. len += scnprintf(buf + len, PAGE_SIZE-len,
  1654. QLA2XXX_INI_MODE_STR_DUAL);
  1655. break;
  1656. }
  1657. len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
  1658. return len;
  1659. }
  1660. static char *mode_to_str[] = {
  1661. "exclusive",
  1662. "disabled",
  1663. "enabled",
  1664. "dual",
  1665. };
  1666. #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
  1667. static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
  1668. {
  1669. enum {
  1670. NO_ACTION,
  1671. MODE_CHANGE_ACCEPT,
  1672. MODE_CHANGE_NO_ACTION,
  1673. TARGET_STILL_ACTIVE,
  1674. };
  1675. int action = NO_ACTION;
  1676. int set_mode = 0;
  1677. u8 eo_toggle = 0; /* exchange offload flipped */
  1678. switch (vha->qlini_mode) {
  1679. case QLA2XXX_INI_MODE_DISABLED:
  1680. switch (op) {
  1681. case QLA2XXX_INI_MODE_DISABLED:
  1682. if (qla_tgt_mode_enabled(vha)) {
  1683. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
  1684. vha->hw->flags.exchoffld_enabled)
  1685. eo_toggle = 1;
  1686. if (((vha->ql2xexchoffld !=
  1687. vha->u_ql2xexchoffld) &&
  1688. NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
  1689. eo_toggle) {
  1690. /*
  1691. * The number of exchange to be offload
  1692. * was tweaked or offload option was
  1693. * flipped
  1694. */
  1695. action = MODE_CHANGE_ACCEPT;
  1696. } else {
  1697. action = MODE_CHANGE_NO_ACTION;
  1698. }
  1699. } else {
  1700. action = MODE_CHANGE_NO_ACTION;
  1701. }
  1702. break;
  1703. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1704. if (qla_tgt_mode_enabled(vha)) {
  1705. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
  1706. vha->hw->flags.exchoffld_enabled)
  1707. eo_toggle = 1;
  1708. if (((vha->ql2xexchoffld !=
  1709. vha->u_ql2xexchoffld) &&
  1710. NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
  1711. eo_toggle) {
  1712. /*
  1713. * The number of exchange to be offload
  1714. * was tweaked or offload option was
  1715. * flipped
  1716. */
  1717. action = MODE_CHANGE_ACCEPT;
  1718. } else {
  1719. action = MODE_CHANGE_NO_ACTION;
  1720. }
  1721. } else {
  1722. action = MODE_CHANGE_ACCEPT;
  1723. }
  1724. break;
  1725. case QLA2XXX_INI_MODE_DUAL:
  1726. action = MODE_CHANGE_ACCEPT;
  1727. /* active_mode is target only, reset it to dual */
  1728. if (qla_tgt_mode_enabled(vha)) {
  1729. set_mode = 1;
  1730. action = MODE_CHANGE_ACCEPT;
  1731. } else {
  1732. action = MODE_CHANGE_NO_ACTION;
  1733. }
  1734. break;
  1735. case QLA2XXX_INI_MODE_ENABLED:
  1736. if (qla_tgt_mode_enabled(vha))
  1737. action = TARGET_STILL_ACTIVE;
  1738. else {
  1739. action = MODE_CHANGE_ACCEPT;
  1740. set_mode = 1;
  1741. }
  1742. break;
  1743. }
  1744. break;
  1745. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1746. switch (op) {
  1747. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1748. if (qla_tgt_mode_enabled(vha)) {
  1749. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
  1750. vha->hw->flags.exchoffld_enabled)
  1751. eo_toggle = 1;
  1752. if (((vha->ql2xexchoffld !=
  1753. vha->u_ql2xexchoffld) &&
  1754. NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
  1755. eo_toggle)
  1756. /*
  1757. * The number of exchange to be offload
  1758. * was tweaked or offload option was
  1759. * flipped
  1760. */
  1761. action = MODE_CHANGE_ACCEPT;
  1762. else
  1763. action = NO_ACTION;
  1764. } else
  1765. action = NO_ACTION;
  1766. break;
  1767. case QLA2XXX_INI_MODE_DISABLED:
  1768. if (qla_tgt_mode_enabled(vha)) {
  1769. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
  1770. vha->hw->flags.exchoffld_enabled)
  1771. eo_toggle = 1;
  1772. if (((vha->ql2xexchoffld !=
  1773. vha->u_ql2xexchoffld) &&
  1774. NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
  1775. eo_toggle)
  1776. action = MODE_CHANGE_ACCEPT;
  1777. else
  1778. action = MODE_CHANGE_NO_ACTION;
  1779. } else
  1780. action = MODE_CHANGE_NO_ACTION;
  1781. break;
  1782. case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
  1783. if (qla_tgt_mode_enabled(vha)) {
  1784. action = MODE_CHANGE_ACCEPT;
  1785. set_mode = 1;
  1786. } else
  1787. action = MODE_CHANGE_ACCEPT;
  1788. break;
  1789. case QLA2XXX_INI_MODE_ENABLED:
  1790. if (qla_tgt_mode_enabled(vha))
  1791. action = TARGET_STILL_ACTIVE;
  1792. else {
  1793. if (vha->hw->flags.fw_started)
  1794. action = MODE_CHANGE_NO_ACTION;
  1795. else
  1796. action = MODE_CHANGE_ACCEPT;
  1797. }
  1798. break;
  1799. }
  1800. break;
  1801. case QLA2XXX_INI_MODE_ENABLED:
  1802. switch (op) {
  1803. case QLA2XXX_INI_MODE_ENABLED:
  1804. if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
  1805. vha->hw->flags.exchoffld_enabled)
  1806. eo_toggle = 1;
  1807. if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
  1808. NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
  1809. eo_toggle)
  1810. action = MODE_CHANGE_ACCEPT;
  1811. else
  1812. action = NO_ACTION;
  1813. break;
  1814. case QLA2XXX_INI_MODE_DUAL:
  1815. case QLA2XXX_INI_MODE_DISABLED:
  1816. action = MODE_CHANGE_ACCEPT;
  1817. break;
  1818. default:
  1819. action = MODE_CHANGE_NO_ACTION;
  1820. break;
  1821. }
  1822. break;
  1823. case QLA2XXX_INI_MODE_DUAL:
  1824. switch (op) {
  1825. case QLA2XXX_INI_MODE_DUAL:
  1826. if (qla_tgt_mode_enabled(vha) ||
  1827. qla_dual_mode_enabled(vha)) {
  1828. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
  1829. vha->u_ql2xiniexchg) !=
  1830. vha->hw->flags.exchoffld_enabled)
  1831. eo_toggle = 1;
  1832. if ((((vha->ql2xexchoffld +
  1833. vha->ql2xiniexchg) !=
  1834. (vha->u_ql2xiniexchg +
  1835. vha->u_ql2xexchoffld)) &&
  1836. NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
  1837. vha->u_ql2xexchoffld)) || eo_toggle)
  1838. action = MODE_CHANGE_ACCEPT;
  1839. else
  1840. action = NO_ACTION;
  1841. } else {
  1842. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
  1843. vha->u_ql2xiniexchg) !=
  1844. vha->hw->flags.exchoffld_enabled)
  1845. eo_toggle = 1;
  1846. if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
  1847. != (vha->u_ql2xiniexchg +
  1848. vha->u_ql2xexchoffld)) &&
  1849. NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
  1850. vha->u_ql2xexchoffld)) || eo_toggle)
  1851. action = MODE_CHANGE_NO_ACTION;
  1852. else
  1853. action = NO_ACTION;
  1854. }
  1855. break;
  1856. case QLA2XXX_INI_MODE_DISABLED:
  1857. if (qla_tgt_mode_enabled(vha) ||
  1858. qla_dual_mode_enabled(vha)) {
  1859. /* turning off initiator mode */
  1860. set_mode = 1;
  1861. action = MODE_CHANGE_ACCEPT;
  1862. } else {
  1863. action = MODE_CHANGE_NO_ACTION;
  1864. }
  1865. break;
  1866. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1867. if (qla_tgt_mode_enabled(vha) ||
  1868. qla_dual_mode_enabled(vha)) {
  1869. set_mode = 1;
  1870. action = MODE_CHANGE_ACCEPT;
  1871. } else {
  1872. action = MODE_CHANGE_ACCEPT;
  1873. }
  1874. break;
  1875. case QLA2XXX_INI_MODE_ENABLED:
  1876. if (qla_tgt_mode_enabled(vha) ||
  1877. qla_dual_mode_enabled(vha)) {
  1878. action = TARGET_STILL_ACTIVE;
  1879. } else {
  1880. action = MODE_CHANGE_ACCEPT;
  1881. }
  1882. }
  1883. break;
  1884. }
  1885. switch (action) {
  1886. case MODE_CHANGE_ACCEPT:
  1887. ql_log(ql_log_warn, vha, 0xffff,
  1888. "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
  1889. mode_to_str[vha->qlini_mode], mode_to_str[op],
  1890. vha->ql2xexchoffld, vha->u_ql2xexchoffld,
  1891. vha->ql2xiniexchg, vha->u_ql2xiniexchg);
  1892. vha->qlini_mode = op;
  1893. vha->ql2xexchoffld = vha->u_ql2xexchoffld;
  1894. vha->ql2xiniexchg = vha->u_ql2xiniexchg;
  1895. if (set_mode)
  1896. qlt_set_mode(vha);
  1897. vha->flags.online = 1;
  1898. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1899. break;
  1900. case MODE_CHANGE_NO_ACTION:
  1901. ql_log(ql_log_warn, vha, 0xffff,
  1902. "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
  1903. mode_to_str[vha->qlini_mode], mode_to_str[op],
  1904. vha->ql2xexchoffld, vha->u_ql2xexchoffld,
  1905. vha->ql2xiniexchg, vha->u_ql2xiniexchg);
  1906. vha->qlini_mode = op;
  1907. vha->ql2xexchoffld = vha->u_ql2xexchoffld;
  1908. vha->ql2xiniexchg = vha->u_ql2xiniexchg;
  1909. break;
  1910. case TARGET_STILL_ACTIVE:
  1911. ql_log(ql_log_warn, vha, 0xffff,
  1912. "Target Mode is active. Unable to change Mode.\n");
  1913. break;
  1914. case NO_ACTION:
  1915. default:
  1916. ql_log(ql_log_warn, vha, 0xffff,
  1917. "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
  1918. vha->qlini_mode, op,
  1919. vha->ql2xexchoffld, vha->u_ql2xexchoffld);
  1920. break;
  1921. }
  1922. }
  1923. static ssize_t
  1924. qlini_mode_store(struct device *dev, struct device_attribute *attr,
  1925. const char *buf, size_t count)
  1926. {
  1927. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1928. int ini;
  1929. if (!buf)
  1930. return -EINVAL;
  1931. if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
  1932. strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
  1933. ini = QLA2XXX_INI_MODE_EXCLUSIVE;
  1934. else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
  1935. strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
  1936. ini = QLA2XXX_INI_MODE_DISABLED;
  1937. else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
  1938. strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
  1939. ini = QLA2XXX_INI_MODE_ENABLED;
  1940. else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
  1941. strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
  1942. ini = QLA2XXX_INI_MODE_DUAL;
  1943. else
  1944. return -EINVAL;
  1945. qla_set_ini_mode(vha, ini);
  1946. return strlen(buf);
  1947. }
  1948. static ssize_t
  1949. ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
  1950. char *buf)
  1951. {
  1952. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1953. int len = 0;
  1954. len += scnprintf(buf + len, PAGE_SIZE-len,
  1955. "target exchange: new %d : current: %d\n\n",
  1956. vha->u_ql2xexchoffld, vha->ql2xexchoffld);
  1957. len += scnprintf(buf + len, PAGE_SIZE-len,
  1958. "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
  1959. vha->host_no);
  1960. return len;
  1961. }
  1962. static ssize_t
  1963. ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
  1964. const char *buf, size_t count)
  1965. {
  1966. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1967. int val = 0;
  1968. if (sscanf(buf, "%d", &val) != 1)
  1969. return -EINVAL;
  1970. if (val > FW_MAX_EXCHANGES_CNT)
  1971. val = FW_MAX_EXCHANGES_CNT;
  1972. else if (val < 0)
  1973. val = 0;
  1974. vha->u_ql2xexchoffld = val;
  1975. return strlen(buf);
  1976. }
  1977. static ssize_t
  1978. ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
  1979. char *buf)
  1980. {
  1981. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1982. int len = 0;
  1983. len += scnprintf(buf + len, PAGE_SIZE-len,
  1984. "target exchange: new %d : current: %d\n\n",
  1985. vha->u_ql2xiniexchg, vha->ql2xiniexchg);
  1986. len += scnprintf(buf + len, PAGE_SIZE-len,
  1987. "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
  1988. vha->host_no);
  1989. return len;
  1990. }
  1991. static ssize_t
  1992. ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
  1993. const char *buf, size_t count)
  1994. {
  1995. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1996. int val = 0;
  1997. if (sscanf(buf, "%d", &val) != 1)
  1998. return -EINVAL;
  1999. if (val > FW_MAX_EXCHANGES_CNT)
  2000. val = FW_MAX_EXCHANGES_CNT;
  2001. else if (val < 0)
  2002. val = 0;
  2003. vha->u_ql2xiniexchg = val;
  2004. return strlen(buf);
  2005. }
  2006. static ssize_t
  2007. qla2x00_dif_bundle_statistics_show(struct device *dev,
  2008. struct device_attribute *attr, char *buf)
  2009. {
  2010. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  2011. struct qla_hw_data *ha = vha->hw;
  2012. return scnprintf(buf, PAGE_SIZE,
  2013. "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
  2014. ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
  2015. ha->dif_bundle_writes, ha->dif_bundle_kallocs,
  2016. ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
  2017. }
  2018. static ssize_t
  2019. qla2x00_fw_attr_show(struct device *dev,
  2020. struct device_attribute *attr, char *buf)
  2021. {
  2022. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  2023. struct qla_hw_data *ha = vha->hw;
  2024. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  2025. return scnprintf(buf, PAGE_SIZE, "\n");
  2026. return scnprintf(buf, PAGE_SIZE, "%llx\n",
  2027. (uint64_t)ha->fw_attributes_ext[1] << 48 |
  2028. (uint64_t)ha->fw_attributes_ext[0] << 32 |
  2029. (uint64_t)ha->fw_attributes_h << 16 |
  2030. (uint64_t)ha->fw_attributes);
  2031. }
  2032. static ssize_t
  2033. qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
  2034. char *buf)
  2035. {
  2036. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  2037. return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
  2038. }
  2039. static ssize_t
  2040. qla2x00_dport_diagnostics_show(struct device *dev,
  2041. struct device_attribute *attr, char *buf)
  2042. {
  2043. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  2044. if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
  2045. !IS_QLA28XX(vha->hw))
  2046. return scnprintf(buf, PAGE_SIZE, "\n");
  2047. if (!*vha->dport_data)
  2048. return scnprintf(buf, PAGE_SIZE, "\n");
  2049. return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
  2050. vha->dport_data[0], vha->dport_data[1],
  2051. vha->dport_data[2], vha->dport_data[3]);
  2052. }
  2053. static DEVICE_ATTR(dport_diagnostics, 0444,
  2054. qla2x00_dport_diagnostics_show, NULL);
  2055. static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
  2056. static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
  2057. static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
  2058. static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
  2059. static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
  2060. static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
  2061. static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
  2062. static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
  2063. static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
  2064. static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
  2065. static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
  2066. qla2x00_zio_timer_store);
  2067. static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
  2068. qla2x00_beacon_store);
  2069. static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show,
  2070. qla2x00_beacon_config_store);
  2071. static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
  2072. qla2x00_optrom_bios_version_show, NULL);
  2073. static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
  2074. qla2x00_optrom_efi_version_show, NULL);
  2075. static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
  2076. qla2x00_optrom_fcode_version_show, NULL);
  2077. static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
  2078. NULL);
  2079. static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
  2080. qla2x00_optrom_gold_fw_version_show, NULL);
  2081. static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
  2082. NULL);
  2083. static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
  2084. NULL);
  2085. static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
  2086. static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
  2087. static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
  2088. static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
  2089. NULL);
  2090. static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
  2091. static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
  2092. qla2x00_vn_port_mac_address_show, NULL);
  2093. static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
  2094. static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
  2095. static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
  2096. static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
  2097. static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
  2098. static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
  2099. static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
  2100. qla2x00_allow_cna_fw_dump_show,
  2101. qla2x00_allow_cna_fw_dump_store);
  2102. static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
  2103. static DEVICE_ATTR(min_supported_speed, 0444,
  2104. qla2x00_min_supported_speed_show, NULL);
  2105. static DEVICE_ATTR(max_supported_speed, 0444,
  2106. qla2x00_max_supported_speed_show, NULL);
  2107. static DEVICE_ATTR(zio_threshold, 0644,
  2108. qla_zio_threshold_show,
  2109. qla_zio_threshold_store);
  2110. static DEVICE_ATTR_RW(qlini_mode);
  2111. static DEVICE_ATTR_RW(ql2xexchoffld);
  2112. static DEVICE_ATTR_RW(ql2xiniexchg);
  2113. static DEVICE_ATTR(dif_bundle_statistics, 0444,
  2114. qla2x00_dif_bundle_statistics_show, NULL);
  2115. static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
  2116. qla2x00_port_speed_store);
  2117. static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
  2118. static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
  2119. static struct attribute *qla2x00_host_attrs[] = {
  2120. &dev_attr_driver_version.attr,
  2121. &dev_attr_fw_version.attr,
  2122. &dev_attr_serial_num.attr,
  2123. &dev_attr_isp_name.attr,
  2124. &dev_attr_isp_id.attr,
  2125. &dev_attr_model_name.attr,
  2126. &dev_attr_model_desc.attr,
  2127. &dev_attr_pci_info.attr,
  2128. &dev_attr_link_state.attr,
  2129. &dev_attr_zio.attr,
  2130. &dev_attr_zio_timer.attr,
  2131. &dev_attr_beacon.attr,
  2132. &dev_attr_beacon_config.attr,
  2133. &dev_attr_optrom_bios_version.attr,
  2134. &dev_attr_optrom_efi_version.attr,
  2135. &dev_attr_optrom_fcode_version.attr,
  2136. &dev_attr_optrom_fw_version.attr,
  2137. &dev_attr_84xx_fw_version.attr,
  2138. &dev_attr_total_isp_aborts.attr,
  2139. &dev_attr_serdes_version.attr,
  2140. &dev_attr_mpi_version.attr,
  2141. &dev_attr_phy_version.attr,
  2142. &dev_attr_flash_block_size.attr,
  2143. &dev_attr_vlan_id.attr,
  2144. &dev_attr_vn_port_mac_address.attr,
  2145. &dev_attr_fabric_param.attr,
  2146. &dev_attr_fw_state.attr,
  2147. &dev_attr_optrom_gold_fw_version.attr,
  2148. &dev_attr_thermal_temp.attr,
  2149. &dev_attr_diag_requests.attr,
  2150. &dev_attr_diag_megabytes.attr,
  2151. &dev_attr_fw_dump_size.attr,
  2152. &dev_attr_allow_cna_fw_dump.attr,
  2153. &dev_attr_pep_version.attr,
  2154. &dev_attr_min_supported_speed.attr,
  2155. &dev_attr_max_supported_speed.attr,
  2156. &dev_attr_zio_threshold.attr,
  2157. &dev_attr_dif_bundle_statistics.attr,
  2158. &dev_attr_port_speed.attr,
  2159. &dev_attr_port_no.attr,
  2160. &dev_attr_fw_attr.attr,
  2161. &dev_attr_dport_diagnostics.attr,
  2162. &dev_attr_mpi_pause.attr,
  2163. &dev_attr_qlini_mode.attr,
  2164. &dev_attr_ql2xiniexchg.attr,
  2165. &dev_attr_ql2xexchoffld.attr,
  2166. NULL,
  2167. };
  2168. static umode_t qla_host_attr_is_visible(struct kobject *kobj,
  2169. struct attribute *attr, int i)
  2170. {
  2171. if (ql2x_ini_mode != QLA2XXX_INI_MODE_DUAL &&
  2172. (attr == &dev_attr_qlini_mode.attr ||
  2173. attr == &dev_attr_ql2xiniexchg.attr ||
  2174. attr == &dev_attr_ql2xexchoffld.attr))
  2175. return 0;
  2176. return attr->mode;
  2177. }
  2178. static const struct attribute_group qla2x00_host_attr_group = {
  2179. .is_visible = qla_host_attr_is_visible,
  2180. .attrs = qla2x00_host_attrs
  2181. };
  2182. const struct attribute_group *qla2x00_host_groups[] = {
  2183. &qla2x00_host_attr_group,
  2184. NULL
  2185. };
  2186. /* Host attributes. */
  2187. static void
  2188. qla2x00_get_host_port_id(struct Scsi_Host *shost)
  2189. {
  2190. scsi_qla_host_t *vha = shost_priv(shost);
  2191. fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
  2192. vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
  2193. }
  2194. static void
  2195. qla2x00_get_host_speed(struct Scsi_Host *shost)
  2196. {
  2197. scsi_qla_host_t *vha = shost_priv(shost);
  2198. u32 speed;
  2199. if (IS_QLAFX00(vha->hw)) {
  2200. qlafx00_get_host_speed(shost);
  2201. return;
  2202. }
  2203. switch (vha->hw->link_data_rate) {
  2204. case PORT_SPEED_1GB:
  2205. speed = FC_PORTSPEED_1GBIT;
  2206. break;
  2207. case PORT_SPEED_2GB:
  2208. speed = FC_PORTSPEED_2GBIT;
  2209. break;
  2210. case PORT_SPEED_4GB:
  2211. speed = FC_PORTSPEED_4GBIT;
  2212. break;
  2213. case PORT_SPEED_8GB:
  2214. speed = FC_PORTSPEED_8GBIT;
  2215. break;
  2216. case PORT_SPEED_10GB:
  2217. speed = FC_PORTSPEED_10GBIT;
  2218. break;
  2219. case PORT_SPEED_16GB:
  2220. speed = FC_PORTSPEED_16GBIT;
  2221. break;
  2222. case PORT_SPEED_32GB:
  2223. speed = FC_PORTSPEED_32GBIT;
  2224. break;
  2225. case PORT_SPEED_64GB:
  2226. speed = FC_PORTSPEED_64GBIT;
  2227. break;
  2228. default:
  2229. speed = FC_PORTSPEED_UNKNOWN;
  2230. break;
  2231. }
  2232. fc_host_speed(shost) = speed;
  2233. }
  2234. static void
  2235. qla2x00_get_host_port_type(struct Scsi_Host *shost)
  2236. {
  2237. scsi_qla_host_t *vha = shost_priv(shost);
  2238. uint32_t port_type;
  2239. if (vha->vp_idx) {
  2240. fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
  2241. return;
  2242. }
  2243. switch (vha->hw->current_topology) {
  2244. case ISP_CFG_NL:
  2245. port_type = FC_PORTTYPE_LPORT;
  2246. break;
  2247. case ISP_CFG_FL:
  2248. port_type = FC_PORTTYPE_NLPORT;
  2249. break;
  2250. case ISP_CFG_N:
  2251. port_type = FC_PORTTYPE_PTP;
  2252. break;
  2253. case ISP_CFG_F:
  2254. port_type = FC_PORTTYPE_NPORT;
  2255. break;
  2256. default:
  2257. port_type = FC_PORTTYPE_UNKNOWN;
  2258. break;
  2259. }
  2260. fc_host_port_type(shost) = port_type;
  2261. }
  2262. static void
  2263. qla2x00_get_starget_node_name(struct scsi_target *starget)
  2264. {
  2265. struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
  2266. scsi_qla_host_t *vha = shost_priv(host);
  2267. fc_port_t *fcport;
  2268. u64 node_name = 0;
  2269. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  2270. if (fcport->rport &&
  2271. starget->id == fcport->rport->scsi_target_id) {
  2272. node_name = wwn_to_u64(fcport->node_name);
  2273. break;
  2274. }
  2275. }
  2276. fc_starget_node_name(starget) = node_name;
  2277. }
  2278. static void
  2279. qla2x00_get_starget_port_name(struct scsi_target *starget)
  2280. {
  2281. struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
  2282. scsi_qla_host_t *vha = shost_priv(host);
  2283. fc_port_t *fcport;
  2284. u64 port_name = 0;
  2285. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  2286. if (fcport->rport &&
  2287. starget->id == fcport->rport->scsi_target_id) {
  2288. port_name = wwn_to_u64(fcport->port_name);
  2289. break;
  2290. }
  2291. }
  2292. fc_starget_port_name(starget) = port_name;
  2293. }
  2294. static void
  2295. qla2x00_get_starget_port_id(struct scsi_target *starget)
  2296. {
  2297. struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
  2298. scsi_qla_host_t *vha = shost_priv(host);
  2299. fc_port_t *fcport;
  2300. uint32_t port_id = ~0U;
  2301. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  2302. if (fcport->rport &&
  2303. starget->id == fcport->rport->scsi_target_id) {
  2304. port_id = fcport->d_id.b.domain << 16 |
  2305. fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
  2306. break;
  2307. }
  2308. }
  2309. fc_starget_port_id(starget) = port_id;
  2310. }
  2311. static inline void
  2312. qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
  2313. {
  2314. fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
  2315. rport->dev_loss_tmo = timeout ? timeout : 1;
  2316. if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port)
  2317. nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
  2318. rport->dev_loss_tmo);
  2319. }
  2320. static void
  2321. qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
  2322. {
  2323. struct Scsi_Host *host = rport_to_shost(rport);
  2324. fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
  2325. unsigned long flags;
  2326. if (!fcport)
  2327. return;
  2328. ql_dbg(ql_dbg_async, fcport->vha, 0x5101,
  2329. DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d",
  2330. rport->port_state));
  2331. /*
  2332. * Now that the rport has been deleted, set the fcport state to
  2333. * FCS_DEVICE_DEAD, if the fcport is still lost.
  2334. */
  2335. if (fcport->scan_state != QLA_FCPORT_FOUND)
  2336. qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
  2337. /*
  2338. * Transport has effectively 'deleted' the rport, clear
  2339. * all local references.
  2340. */
  2341. spin_lock_irqsave(host->host_lock, flags);
  2342. /* Confirm port has not reappeared before clearing pointers. */
  2343. if (rport->port_state != FC_PORTSTATE_ONLINE) {
  2344. fcport->rport = fcport->drport = NULL;
  2345. *((fc_port_t **)rport->dd_data) = NULL;
  2346. }
  2347. spin_unlock_irqrestore(host->host_lock, flags);
  2348. if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
  2349. return;
  2350. if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
  2351. qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
  2352. return;
  2353. }
  2354. }
  2355. static void
  2356. qla2x00_terminate_rport_io(struct fc_rport *rport)
  2357. {
  2358. fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
  2359. scsi_qla_host_t *vha;
  2360. if (!fcport)
  2361. return;
  2362. if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
  2363. return;
  2364. if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
  2365. return;
  2366. vha = fcport->vha;
  2367. if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
  2368. qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
  2369. qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
  2370. 0, WAIT_TARGET);
  2371. return;
  2372. }
  2373. /*
  2374. * At this point all fcport's software-states are cleared. Perform any
  2375. * final cleanup of firmware resources (PCBs and XCBs).
  2376. *
  2377. * Attempt to cleanup only lost devices.
  2378. */
  2379. if (fcport->loop_id != FC_NO_LOOP_ID) {
  2380. if (IS_FWI2_CAPABLE(fcport->vha->hw) &&
  2381. fcport->scan_state != QLA_FCPORT_FOUND) {
  2382. if (fcport->loop_id != FC_NO_LOOP_ID)
  2383. fcport->logout_on_delete = 1;
  2384. if (!EDIF_NEGOTIATION_PENDING(fcport)) {
  2385. ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
  2386. "%s %d schedule session deletion\n", __func__,
  2387. __LINE__);
  2388. qlt_schedule_sess_for_deletion(fcport);
  2389. }
  2390. } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) {
  2391. qla2x00_port_logout(fcport->vha, fcport);
  2392. }
  2393. }
  2394. /* check for any straggling io left behind */
  2395. if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) {
  2396. ql_log(ql_log_warn, vha, 0x300b,
  2397. "IO not return. Resetting. \n");
  2398. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2399. qla2xxx_wake_dpc(vha);
  2400. qla2x00_wait_for_chip_reset(vha);
  2401. }
  2402. }
  2403. static int
  2404. qla2x00_issue_lip(struct Scsi_Host *shost)
  2405. {
  2406. scsi_qla_host_t *vha = shost_priv(shost);
  2407. if (IS_QLAFX00(vha->hw))
  2408. return 0;
  2409. if (vha->hw->flags.port_isolated)
  2410. return 0;
  2411. qla2x00_loop_reset(vha);
  2412. return 0;
  2413. }
  2414. static struct fc_host_statistics *
  2415. qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
  2416. {
  2417. scsi_qla_host_t *vha = shost_priv(shost);
  2418. struct qla_hw_data *ha = vha->hw;
  2419. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  2420. int rval;
  2421. struct link_statistics *stats;
  2422. dma_addr_t stats_dma;
  2423. struct fc_host_statistics *p = &vha->fc_host_stat;
  2424. struct qla_qpair *qpair;
  2425. int i;
  2426. u64 ib = 0, ob = 0, ir = 0, or = 0;
  2427. memset(p, -1, sizeof(*p));
  2428. if (IS_QLAFX00(vha->hw))
  2429. goto done;
  2430. if (test_bit(UNLOADING, &vha->dpc_flags))
  2431. goto done;
  2432. if (unlikely(pci_channel_offline(ha->pdev)))
  2433. goto done;
  2434. if (qla2x00_chip_is_down(vha))
  2435. goto done;
  2436. stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
  2437. GFP_KERNEL);
  2438. if (!stats) {
  2439. ql_log(ql_log_warn, vha, 0x707d,
  2440. "Failed to allocate memory for stats.\n");
  2441. goto done;
  2442. }
  2443. rval = QLA_FUNCTION_FAILED;
  2444. if (IS_FWI2_CAPABLE(ha)) {
  2445. rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
  2446. } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
  2447. !ha->dpc_active) {
  2448. /* Must be in a 'READY' state for statistics retrieval. */
  2449. rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
  2450. stats, stats_dma);
  2451. }
  2452. if (rval != QLA_SUCCESS)
  2453. goto done_free;
  2454. /* --- */
  2455. for (i = 0; i < vha->hw->max_qpairs; i++) {
  2456. qpair = vha->hw->queue_pair_map[i];
  2457. if (!qpair)
  2458. continue;
  2459. ir += qpair->counters.input_requests;
  2460. or += qpair->counters.output_requests;
  2461. ib += qpair->counters.input_bytes;
  2462. ob += qpair->counters.output_bytes;
  2463. }
  2464. ir += ha->base_qpair->counters.input_requests;
  2465. or += ha->base_qpair->counters.output_requests;
  2466. ib += ha->base_qpair->counters.input_bytes;
  2467. ob += ha->base_qpair->counters.output_bytes;
  2468. ir += vha->qla_stats.input_requests;
  2469. or += vha->qla_stats.output_requests;
  2470. ib += vha->qla_stats.input_bytes;
  2471. ob += vha->qla_stats.output_bytes;
  2472. /* --- */
  2473. p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
  2474. p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
  2475. p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
  2476. p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt);
  2477. p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt);
  2478. p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt);
  2479. if (IS_FWI2_CAPABLE(ha)) {
  2480. p->lip_count = le32_to_cpu(stats->lip_cnt);
  2481. p->tx_frames = le32_to_cpu(stats->tx_frames);
  2482. p->rx_frames = le32_to_cpu(stats->rx_frames);
  2483. p->dumped_frames = le32_to_cpu(stats->discarded_frames);
  2484. p->nos_count = le32_to_cpu(stats->nos_rcvd);
  2485. p->error_frames =
  2486. le32_to_cpu(stats->dropped_frames) +
  2487. le32_to_cpu(stats->discarded_frames);
  2488. if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  2489. p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
  2490. p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
  2491. } else {
  2492. p->rx_words = ib >> 2;
  2493. p->tx_words = ob >> 2;
  2494. }
  2495. }
  2496. p->fcp_control_requests = vha->qla_stats.control_requests;
  2497. p->fcp_input_requests = ir;
  2498. p->fcp_output_requests = or;
  2499. p->fcp_input_megabytes = ib >> 20;
  2500. p->fcp_output_megabytes = ob >> 20;
  2501. p->seconds_since_last_reset =
  2502. get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
  2503. do_div(p->seconds_since_last_reset, HZ);
  2504. done_free:
  2505. dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
  2506. stats, stats_dma);
  2507. done:
  2508. return p;
  2509. }
  2510. static void
  2511. qla2x00_reset_host_stats(struct Scsi_Host *shost)
  2512. {
  2513. scsi_qla_host_t *vha = shost_priv(shost);
  2514. struct qla_hw_data *ha = vha->hw;
  2515. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  2516. struct link_statistics *stats;
  2517. dma_addr_t stats_dma;
  2518. int i;
  2519. struct qla_qpair *qpair;
  2520. memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
  2521. memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
  2522. for (i = 0; i < vha->hw->max_qpairs; i++) {
  2523. qpair = vha->hw->queue_pair_map[i];
  2524. if (!qpair)
  2525. continue;
  2526. memset(&qpair->counters, 0, sizeof(qpair->counters));
  2527. }
  2528. memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters));
  2529. vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
  2530. if (IS_FWI2_CAPABLE(ha)) {
  2531. int rval;
  2532. stats = dma_alloc_coherent(&ha->pdev->dev,
  2533. sizeof(*stats), &stats_dma, GFP_KERNEL);
  2534. if (!stats) {
  2535. ql_log(ql_log_warn, vha, 0x70d7,
  2536. "Failed to allocate memory for stats.\n");
  2537. return;
  2538. }
  2539. /* reset firmware statistics */
  2540. rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
  2541. if (rval != QLA_SUCCESS)
  2542. ql_log(ql_log_warn, vha, 0x70de,
  2543. "Resetting ISP statistics failed: rval = %d\n",
  2544. rval);
  2545. dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
  2546. stats, stats_dma);
  2547. }
  2548. }
  2549. static void
  2550. qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
  2551. {
  2552. scsi_qla_host_t *vha = shost_priv(shost);
  2553. qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
  2554. sizeof(fc_host_symbolic_name(shost)));
  2555. }
  2556. static void
  2557. qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
  2558. {
  2559. scsi_qla_host_t *vha = shost_priv(shost);
  2560. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  2561. }
  2562. static void
  2563. qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
  2564. {
  2565. scsi_qla_host_t *vha = shost_priv(shost);
  2566. static const uint8_t node_name[WWN_SIZE] = {
  2567. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
  2568. };
  2569. u64 fabric_name = wwn_to_u64(node_name);
  2570. if (vha->device_flags & SWITCH_FOUND)
  2571. fabric_name = wwn_to_u64(vha->fabric_node_name);
  2572. fc_host_fabric_name(shost) = fabric_name;
  2573. }
  2574. static void
  2575. qla2x00_get_host_port_state(struct Scsi_Host *shost)
  2576. {
  2577. scsi_qla_host_t *vha = shost_priv(shost);
  2578. struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
  2579. if (!base_vha->flags.online) {
  2580. fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
  2581. return;
  2582. }
  2583. switch (atomic_read(&base_vha->loop_state)) {
  2584. case LOOP_UPDATE:
  2585. fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
  2586. break;
  2587. case LOOP_DOWN:
  2588. if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
  2589. fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
  2590. else
  2591. fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
  2592. break;
  2593. case LOOP_DEAD:
  2594. fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
  2595. break;
  2596. case LOOP_READY:
  2597. fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
  2598. break;
  2599. default:
  2600. fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
  2601. break;
  2602. }
  2603. }
  2604. static int
  2605. qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
  2606. {
  2607. int ret = 0;
  2608. uint8_t qos = 0;
  2609. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  2610. scsi_qla_host_t *vha = NULL;
  2611. struct qla_hw_data *ha = base_vha->hw;
  2612. int cnt;
  2613. struct req_que *req = ha->req_q_map[0];
  2614. struct qla_qpair *qpair;
  2615. ret = qla24xx_vport_create_req_sanity_check(fc_vport);
  2616. if (ret) {
  2617. ql_log(ql_log_warn, vha, 0x707e,
  2618. "Vport sanity check failed, status %x\n", ret);
  2619. return (ret);
  2620. }
  2621. vha = qla24xx_create_vhost(fc_vport);
  2622. if (vha == NULL) {
  2623. ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
  2624. return FC_VPORT_FAILED;
  2625. }
  2626. if (disable) {
  2627. atomic_set(&vha->vp_state, VP_OFFLINE);
  2628. fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
  2629. } else
  2630. atomic_set(&vha->vp_state, VP_FAILED);
  2631. /* ready to create vport */
  2632. ql_log(ql_log_info, vha, 0x7080,
  2633. "VP entry id %d assigned.\n", vha->vp_idx);
  2634. /* initialized vport states */
  2635. atomic_set(&vha->loop_state, LOOP_DOWN);
  2636. vha->vp_err_state = VP_ERR_PORTDWN;
  2637. vha->vp_prev_err_state = VP_ERR_UNKWN;
  2638. /* Check if physical ha port is Up */
  2639. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  2640. atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
  2641. /* Don't retry or attempt login of this virtual port */
  2642. ql_dbg(ql_dbg_user, vha, 0x7081,
  2643. "Vport loop state is not UP.\n");
  2644. atomic_set(&vha->loop_state, LOOP_DEAD);
  2645. if (!disable)
  2646. fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
  2647. }
  2648. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
  2649. if (ha->fw_attributes & BIT_4) {
  2650. int prot = 0, guard;
  2651. vha->flags.difdix_supported = 1;
  2652. ql_dbg(ql_dbg_user, vha, 0x7082,
  2653. "Registered for DIF/DIX type 1 and 3 protection.\n");
  2654. scsi_host_set_prot(vha->host,
  2655. prot | SHOST_DIF_TYPE1_PROTECTION
  2656. | SHOST_DIF_TYPE2_PROTECTION
  2657. | SHOST_DIF_TYPE3_PROTECTION
  2658. | SHOST_DIX_TYPE1_PROTECTION
  2659. | SHOST_DIX_TYPE2_PROTECTION
  2660. | SHOST_DIX_TYPE3_PROTECTION);
  2661. guard = SHOST_DIX_GUARD_CRC;
  2662. if (IS_PI_IPGUARD_CAPABLE(ha) &&
  2663. (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
  2664. guard |= SHOST_DIX_GUARD_IP;
  2665. scsi_host_set_guard(vha->host, guard);
  2666. } else
  2667. vha->flags.difdix_supported = 0;
  2668. }
  2669. if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
  2670. &ha->pdev->dev)) {
  2671. ql_dbg(ql_dbg_user, vha, 0x7083,
  2672. "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
  2673. goto vport_create_failed_2;
  2674. }
  2675. /* initialize attributes */
  2676. fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
  2677. fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
  2678. fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
  2679. fc_host_supported_classes(vha->host) =
  2680. fc_host_supported_classes(base_vha->host);
  2681. fc_host_supported_speeds(vha->host) =
  2682. fc_host_supported_speeds(base_vha->host);
  2683. qlt_vport_create(vha, ha);
  2684. qla24xx_vport_disable(fc_vport, disable);
  2685. if (!ql2xmqsupport || !ha->npiv_info)
  2686. goto vport_queue;
  2687. /* Create a request queue in QoS mode for the vport */
  2688. for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
  2689. if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
  2690. && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
  2691. 8) == 0) {
  2692. qos = ha->npiv_info[cnt].q_qos;
  2693. break;
  2694. }
  2695. }
  2696. if (qos) {
  2697. qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
  2698. if (!qpair)
  2699. ql_log(ql_log_warn, vha, 0x7084,
  2700. "Can't create qpair for VP[%d]\n",
  2701. vha->vp_idx);
  2702. else {
  2703. ql_dbg(ql_dbg_multiq, vha, 0xc001,
  2704. "Queue pair: %d Qos: %d) created for VP[%d]\n",
  2705. qpair->id, qos, vha->vp_idx);
  2706. ql_dbg(ql_dbg_user, vha, 0x7085,
  2707. "Queue Pair: %d Qos: %d) created for VP[%d]\n",
  2708. qpair->id, qos, vha->vp_idx);
  2709. req = qpair->req;
  2710. vha->qpair = qpair;
  2711. }
  2712. }
  2713. vport_queue:
  2714. vha->req = req;
  2715. return 0;
  2716. vport_create_failed_2:
  2717. qla24xx_disable_vp(vha);
  2718. qla24xx_deallocate_vp_id(vha);
  2719. scsi_host_put(vha->host);
  2720. return FC_VPORT_FAILED;
  2721. }
  2722. static int
  2723. qla24xx_vport_delete(struct fc_vport *fc_vport)
  2724. {
  2725. scsi_qla_host_t *vha = fc_vport->dd_data;
  2726. struct qla_hw_data *ha = vha->hw;
  2727. uint16_t id = vha->vp_idx;
  2728. set_bit(VPORT_DELETE, &vha->dpc_flags);
  2729. while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
  2730. test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
  2731. msleep(1000);
  2732. qla24xx_disable_vp(vha);
  2733. qla2x00_wait_for_sess_deletion(vha);
  2734. qla_nvme_delete(vha);
  2735. qla_enode_stop(vha);
  2736. qla_edb_stop(vha);
  2737. vha->flags.delete_progress = 1;
  2738. qlt_remove_target(ha, vha);
  2739. fc_remove_host(vha->host);
  2740. scsi_remove_host(vha->host);
  2741. /* Allow timer to run to drain queued items, when removing vp */
  2742. qla24xx_deallocate_vp_id(vha);
  2743. if (vha->timer_active) {
  2744. qla2x00_vp_stop_timer(vha);
  2745. ql_dbg(ql_dbg_user, vha, 0x7086,
  2746. "Timer for the VP[%d] has stopped\n", vha->vp_idx);
  2747. }
  2748. qla2x00_free_fcports(vha);
  2749. mutex_lock(&ha->vport_lock);
  2750. ha->cur_vport_count--;
  2751. clear_bit(vha->vp_idx, ha->vp_idx_map);
  2752. mutex_unlock(&ha->vport_lock);
  2753. dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
  2754. vha->gnl.ldma);
  2755. vha->gnl.l = NULL;
  2756. vfree(vha->scan.l);
  2757. if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
  2758. if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
  2759. ql_log(ql_log_warn, vha, 0x7087,
  2760. "Queue Pair delete failed.\n");
  2761. }
  2762. ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
  2763. scsi_host_put(vha->host);
  2764. return 0;
  2765. }
  2766. static int
  2767. qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
  2768. {
  2769. scsi_qla_host_t *vha = fc_vport->dd_data;
  2770. if (disable)
  2771. qla24xx_disable_vp(vha);
  2772. else
  2773. qla24xx_enable_vp(vha);
  2774. return 0;
  2775. }
  2776. struct fc_function_template qla2xxx_transport_functions = {
  2777. .show_host_node_name = 1,
  2778. .show_host_port_name = 1,
  2779. .show_host_supported_classes = 1,
  2780. .show_host_supported_speeds = 1,
  2781. .get_host_port_id = qla2x00_get_host_port_id,
  2782. .show_host_port_id = 1,
  2783. .get_host_speed = qla2x00_get_host_speed,
  2784. .show_host_speed = 1,
  2785. .get_host_port_type = qla2x00_get_host_port_type,
  2786. .show_host_port_type = 1,
  2787. .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
  2788. .show_host_symbolic_name = 1,
  2789. .set_host_system_hostname = qla2x00_set_host_system_hostname,
  2790. .show_host_system_hostname = 1,
  2791. .get_host_fabric_name = qla2x00_get_host_fabric_name,
  2792. .show_host_fabric_name = 1,
  2793. .get_host_port_state = qla2x00_get_host_port_state,
  2794. .show_host_port_state = 1,
  2795. .dd_fcrport_size = sizeof(struct fc_port *),
  2796. .show_rport_supported_classes = 1,
  2797. .get_starget_node_name = qla2x00_get_starget_node_name,
  2798. .show_starget_node_name = 1,
  2799. .get_starget_port_name = qla2x00_get_starget_port_name,
  2800. .show_starget_port_name = 1,
  2801. .get_starget_port_id = qla2x00_get_starget_port_id,
  2802. .show_starget_port_id = 1,
  2803. .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
  2804. .show_rport_dev_loss_tmo = 1,
  2805. .issue_fc_host_lip = qla2x00_issue_lip,
  2806. .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
  2807. .terminate_rport_io = qla2x00_terminate_rport_io,
  2808. .get_fc_host_stats = qla2x00_get_fc_host_stats,
  2809. .reset_fc_host_stats = qla2x00_reset_host_stats,
  2810. .vport_create = qla24xx_vport_create,
  2811. .vport_disable = qla24xx_vport_disable,
  2812. .vport_delete = qla24xx_vport_delete,
  2813. .bsg_request = qla24xx_bsg_request,
  2814. .bsg_timeout = qla24xx_bsg_timeout,
  2815. };
  2816. struct fc_function_template qla2xxx_transport_vport_functions = {
  2817. .show_host_node_name = 1,
  2818. .show_host_port_name = 1,
  2819. .show_host_supported_classes = 1,
  2820. .get_host_port_id = qla2x00_get_host_port_id,
  2821. .show_host_port_id = 1,
  2822. .get_host_speed = qla2x00_get_host_speed,
  2823. .show_host_speed = 1,
  2824. .get_host_port_type = qla2x00_get_host_port_type,
  2825. .show_host_port_type = 1,
  2826. .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
  2827. .show_host_symbolic_name = 1,
  2828. .set_host_system_hostname = qla2x00_set_host_system_hostname,
  2829. .show_host_system_hostname = 1,
  2830. .get_host_fabric_name = qla2x00_get_host_fabric_name,
  2831. .show_host_fabric_name = 1,
  2832. .get_host_port_state = qla2x00_get_host_port_state,
  2833. .show_host_port_state = 1,
  2834. .dd_fcrport_size = sizeof(struct fc_port *),
  2835. .show_rport_supported_classes = 1,
  2836. .get_starget_node_name = qla2x00_get_starget_node_name,
  2837. .show_starget_node_name = 1,
  2838. .get_starget_port_name = qla2x00_get_starget_port_name,
  2839. .show_starget_port_name = 1,
  2840. .get_starget_port_id = qla2x00_get_starget_port_id,
  2841. .show_starget_port_id = 1,
  2842. .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
  2843. .show_rport_dev_loss_tmo = 1,
  2844. .issue_fc_host_lip = qla2x00_issue_lip,
  2845. .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
  2846. .terminate_rport_io = qla2x00_terminate_rport_io,
  2847. .get_fc_host_stats = qla2x00_get_fc_host_stats,
  2848. .reset_fc_host_stats = qla2x00_reset_host_stats,
  2849. .bsg_request = qla24xx_bsg_request,
  2850. .bsg_timeout = qla24xx_bsg_timeout,
  2851. };
  2852. static uint
  2853. qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds)
  2854. {
  2855. uint supported_speeds = FC_PORTSPEED_UNKNOWN;
  2856. if (speeds & FDMI_PORT_SPEED_64GB)
  2857. supported_speeds |= FC_PORTSPEED_64GBIT;
  2858. if (speeds & FDMI_PORT_SPEED_32GB)
  2859. supported_speeds |= FC_PORTSPEED_32GBIT;
  2860. if (speeds & FDMI_PORT_SPEED_16GB)
  2861. supported_speeds |= FC_PORTSPEED_16GBIT;
  2862. if (speeds & FDMI_PORT_SPEED_8GB)
  2863. supported_speeds |= FC_PORTSPEED_8GBIT;
  2864. if (speeds & FDMI_PORT_SPEED_4GB)
  2865. supported_speeds |= FC_PORTSPEED_4GBIT;
  2866. if (speeds & FDMI_PORT_SPEED_2GB)
  2867. supported_speeds |= FC_PORTSPEED_2GBIT;
  2868. if (speeds & FDMI_PORT_SPEED_1GB)
  2869. supported_speeds |= FC_PORTSPEED_1GBIT;
  2870. return supported_speeds;
  2871. }
  2872. void
  2873. qla2x00_init_host_attr(scsi_qla_host_t *vha)
  2874. {
  2875. struct qla_hw_data *ha = vha->hw;
  2876. u32 speeds = 0, fdmi_speed = 0;
  2877. fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
  2878. fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
  2879. fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
  2880. fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
  2881. (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
  2882. fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
  2883. fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
  2884. fdmi_speed = qla25xx_fdmi_port_speed_capability(ha);
  2885. speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed);
  2886. fc_host_supported_speeds(vha->host) = speeds;
  2887. }