mpt3sas_ctl.c 116 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189
  1. /*
  2. * Management Module Support for MPT (Message Passing Technology) based
  3. * controllers
  4. *
  5. * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
  6. * Copyright (C) 2012-2014 LSI Corporation
  7. * Copyright (C) 2013-2014 Avago Technologies
  8. * (mailto: [email protected])
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * NO WARRANTY
  21. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  22. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  23. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  24. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  25. * solely responsible for determining the appropriateness of using and
  26. * distributing the Program and assumes all risks associated with its
  27. * exercise of rights under this Agreement, including but not limited to
  28. * the risks and costs of program errors, damage to or loss of data,
  29. * programs or equipment, and unavailability or interruption of operations.
  30. * DISCLAIMER OF LIABILITY
  31. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38. * You should have received a copy of the GNU General Public License
  39. * along with this program; if not, write to the Free Software
  40. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  41. * USA.
  42. */
  43. #include <linux/kernel.h>
  44. #include <linux/module.h>
  45. #include <linux/errno.h>
  46. #include <linux/init.h>
  47. #include <linux/slab.h>
  48. #include <linux/types.h>
  49. #include <linux/pci.h>
  50. #include <linux/delay.h>
  51. #include <linux/compat.h>
  52. #include <linux/poll.h>
  53. #include <linux/io.h>
  54. #include <linux/uaccess.h>
  55. #include "mpt3sas_base.h"
  56. #include "mpt3sas_ctl.h"
  57. static struct fasync_struct *async_queue;
  58. static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
  59. /**
  60. * enum block_state - blocking state
  61. * @NON_BLOCKING: non blocking
  62. * @BLOCKING: blocking
  63. *
  64. * These states are for ioctls that need to wait for a response
  65. * from firmware, so they probably require sleep.
  66. */
  67. enum block_state {
  68. NON_BLOCKING,
  69. BLOCKING,
  70. };
  71. /**
  72. * _ctl_display_some_debug - debug routine
  73. * @ioc: per adapter object
  74. * @smid: system request message index
  75. * @calling_function_name: string pass from calling function
  76. * @mpi_reply: reply message frame
  77. * Context: none.
  78. *
  79. * Function for displaying debug info helpful when debugging issues
  80. * in this module.
  81. */
  82. static void
  83. _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  84. char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
  85. {
  86. Mpi2ConfigRequest_t *mpi_request;
  87. char *desc = NULL;
  88. if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
  89. return;
  90. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  91. switch (mpi_request->Function) {
  92. case MPI2_FUNCTION_SCSI_IO_REQUEST:
  93. {
  94. Mpi2SCSIIORequest_t *scsi_request =
  95. (Mpi2SCSIIORequest_t *)mpi_request;
  96. snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
  97. "scsi_io, cmd(0x%02x), cdb_len(%d)",
  98. scsi_request->CDB.CDB32[0],
  99. le16_to_cpu(scsi_request->IoFlags) & 0xF);
  100. desc = ioc->tmp_string;
  101. break;
  102. }
  103. case MPI2_FUNCTION_SCSI_TASK_MGMT:
  104. desc = "task_mgmt";
  105. break;
  106. case MPI2_FUNCTION_IOC_INIT:
  107. desc = "ioc_init";
  108. break;
  109. case MPI2_FUNCTION_IOC_FACTS:
  110. desc = "ioc_facts";
  111. break;
  112. case MPI2_FUNCTION_CONFIG:
  113. {
  114. Mpi2ConfigRequest_t *config_request =
  115. (Mpi2ConfigRequest_t *)mpi_request;
  116. snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
  117. "config, type(0x%02x), ext_type(0x%02x), number(%d)",
  118. (config_request->Header.PageType &
  119. MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
  120. config_request->Header.PageNumber);
  121. desc = ioc->tmp_string;
  122. break;
  123. }
  124. case MPI2_FUNCTION_PORT_FACTS:
  125. desc = "port_facts";
  126. break;
  127. case MPI2_FUNCTION_PORT_ENABLE:
  128. desc = "port_enable";
  129. break;
  130. case MPI2_FUNCTION_EVENT_NOTIFICATION:
  131. desc = "event_notification";
  132. break;
  133. case MPI2_FUNCTION_FW_DOWNLOAD:
  134. desc = "fw_download";
  135. break;
  136. case MPI2_FUNCTION_FW_UPLOAD:
  137. desc = "fw_upload";
  138. break;
  139. case MPI2_FUNCTION_RAID_ACTION:
  140. desc = "raid_action";
  141. break;
  142. case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
  143. {
  144. Mpi2SCSIIORequest_t *scsi_request =
  145. (Mpi2SCSIIORequest_t *)mpi_request;
  146. snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
  147. "raid_pass, cmd(0x%02x), cdb_len(%d)",
  148. scsi_request->CDB.CDB32[0],
  149. le16_to_cpu(scsi_request->IoFlags) & 0xF);
  150. desc = ioc->tmp_string;
  151. break;
  152. }
  153. case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
  154. desc = "sas_iounit_cntl";
  155. break;
  156. case MPI2_FUNCTION_SATA_PASSTHROUGH:
  157. desc = "sata_pass";
  158. break;
  159. case MPI2_FUNCTION_DIAG_BUFFER_POST:
  160. desc = "diag_buffer_post";
  161. break;
  162. case MPI2_FUNCTION_DIAG_RELEASE:
  163. desc = "diag_release";
  164. break;
  165. case MPI2_FUNCTION_SMP_PASSTHROUGH:
  166. desc = "smp_passthrough";
  167. break;
  168. case MPI2_FUNCTION_TOOLBOX:
  169. desc = "toolbox";
  170. break;
  171. case MPI2_FUNCTION_NVME_ENCAPSULATED:
  172. desc = "nvme_encapsulated";
  173. break;
  174. }
  175. if (!desc)
  176. return;
  177. ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
  178. if (!mpi_reply)
  179. return;
  180. if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
  181. ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
  182. le16_to_cpu(mpi_reply->IOCStatus),
  183. le32_to_cpu(mpi_reply->IOCLogInfo));
  184. if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
  185. mpi_request->Function ==
  186. MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
  187. Mpi2SCSIIOReply_t *scsi_reply =
  188. (Mpi2SCSIIOReply_t *)mpi_reply;
  189. struct _sas_device *sas_device = NULL;
  190. struct _pcie_device *pcie_device = NULL;
  191. sas_device = mpt3sas_get_sdev_by_handle(ioc,
  192. le16_to_cpu(scsi_reply->DevHandle));
  193. if (sas_device) {
  194. ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
  195. (u64)sas_device->sas_address,
  196. sas_device->phy);
  197. ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
  198. (u64)sas_device->enclosure_logical_id,
  199. sas_device->slot);
  200. sas_device_put(sas_device);
  201. }
  202. if (!sas_device) {
  203. pcie_device = mpt3sas_get_pdev_by_handle(ioc,
  204. le16_to_cpu(scsi_reply->DevHandle));
  205. if (pcie_device) {
  206. ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
  207. (unsigned long long)pcie_device->wwid,
  208. pcie_device->port_num);
  209. if (pcie_device->enclosure_handle != 0)
  210. ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
  211. (u64)pcie_device->enclosure_logical_id,
  212. pcie_device->slot);
  213. pcie_device_put(pcie_device);
  214. }
  215. }
  216. if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
  217. ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
  218. scsi_reply->SCSIState,
  219. scsi_reply->SCSIStatus);
  220. }
  221. }
  222. /**
  223. * mpt3sas_ctl_done - ctl module completion routine
  224. * @ioc: per adapter object
  225. * @smid: system request message index
  226. * @msix_index: MSIX table index supplied by the OS
  227. * @reply: reply message frame(lower 32bit addr)
  228. * Context: none.
  229. *
  230. * The callback handler when using ioc->ctl_cb_idx.
  231. *
  232. * Return: 1 meaning mf should be freed from _base_interrupt
  233. * 0 means the mf is freed from this function.
  234. */
  235. u8
  236. mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  237. u32 reply)
  238. {
  239. MPI2DefaultReply_t *mpi_reply;
  240. Mpi2SCSIIOReply_t *scsiio_reply;
  241. Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
  242. const void *sense_data;
  243. u32 sz;
  244. if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
  245. return 1;
  246. if (ioc->ctl_cmds.smid != smid)
  247. return 1;
  248. ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
  249. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  250. if (mpi_reply) {
  251. memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
  252. ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
  253. /* get sense data */
  254. if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
  255. mpi_reply->Function ==
  256. MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
  257. scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
  258. if (scsiio_reply->SCSIState &
  259. MPI2_SCSI_STATE_AUTOSENSE_VALID) {
  260. sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
  261. le32_to_cpu(scsiio_reply->SenseCount));
  262. sense_data = mpt3sas_base_get_sense_buffer(ioc,
  263. smid);
  264. memcpy(ioc->ctl_cmds.sense, sense_data, sz);
  265. }
  266. }
  267. /*
  268. * Get Error Response data for NVMe device. The ctl_cmds.sense
  269. * buffer is used to store the Error Response data.
  270. */
  271. if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
  272. nvme_error_reply =
  273. (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
  274. sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
  275. le16_to_cpu(nvme_error_reply->ErrorResponseCount));
  276. sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
  277. memcpy(ioc->ctl_cmds.sense, sense_data, sz);
  278. }
  279. }
  280. _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
  281. ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
  282. complete(&ioc->ctl_cmds.done);
  283. return 1;
  284. }
  285. /**
  286. * _ctl_check_event_type - determines when an event needs logging
  287. * @ioc: per adapter object
  288. * @event: firmware event
  289. *
  290. * The bitmask in ioc->event_type[] indicates which events should be
  291. * be saved in the driver event_log. This bitmask is set by application.
  292. *
  293. * Return: 1 when event should be captured, or zero means no match.
  294. */
  295. static int
  296. _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
  297. {
  298. u16 i;
  299. u32 desired_event;
  300. if (event >= 128 || !event || !ioc->event_log)
  301. return 0;
  302. desired_event = (1 << (event % 32));
  303. if (!desired_event)
  304. desired_event = 1;
  305. i = event / 32;
  306. return desired_event & ioc->event_type[i];
  307. }
  308. /**
  309. * mpt3sas_ctl_add_to_event_log - add event
  310. * @ioc: per adapter object
  311. * @mpi_reply: reply message frame
  312. */
  313. void
  314. mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
  315. Mpi2EventNotificationReply_t *mpi_reply)
  316. {
  317. struct MPT3_IOCTL_EVENTS *event_log;
  318. u16 event;
  319. int i;
  320. u32 sz, event_data_sz;
  321. u8 send_aen = 0;
  322. if (!ioc->event_log)
  323. return;
  324. event = le16_to_cpu(mpi_reply->Event);
  325. if (_ctl_check_event_type(ioc, event)) {
  326. /* insert entry into circular event_log */
  327. i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
  328. event_log = ioc->event_log;
  329. event_log[i].event = event;
  330. event_log[i].context = ioc->event_context++;
  331. event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
  332. sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
  333. memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
  334. memcpy(event_log[i].data, mpi_reply->EventData, sz);
  335. send_aen = 1;
  336. }
  337. /* This aen_event_read_flag flag is set until the
  338. * application has read the event log.
  339. * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
  340. */
  341. if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
  342. (send_aen && !ioc->aen_event_read_flag)) {
  343. ioc->aen_event_read_flag = 1;
  344. wake_up_interruptible(&ctl_poll_wait);
  345. if (async_queue)
  346. kill_fasync(&async_queue, SIGIO, POLL_IN);
  347. }
  348. }
  349. /**
  350. * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
  351. * @ioc: per adapter object
  352. * @msix_index: MSIX table index supplied by the OS
  353. * @reply: reply message frame(lower 32bit addr)
  354. * Context: interrupt.
  355. *
  356. * This function merely adds a new work task into ioc->firmware_event_thread.
  357. * The tasks are worked from _firmware_event_work in user context.
  358. *
  359. * Return: 1 meaning mf should be freed from _base_interrupt
  360. * 0 means the mf is freed from this function.
  361. */
  362. u8
  363. mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
  364. u32 reply)
  365. {
  366. Mpi2EventNotificationReply_t *mpi_reply;
  367. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  368. if (mpi_reply)
  369. mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
  370. return 1;
  371. }
  372. /**
  373. * _ctl_verify_adapter - validates ioc_number passed from application
  374. * @ioc_number: ?
  375. * @iocpp: The ioc pointer is returned in this.
  376. * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
  377. * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
  378. *
  379. * Return: (-1) means error, else ioc_number.
  380. */
  381. static int
  382. _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
  383. int mpi_version)
  384. {
  385. struct MPT3SAS_ADAPTER *ioc;
  386. int version = 0;
  387. /* global ioc lock to protect controller on list operations */
  388. spin_lock(&gioc_lock);
  389. list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
  390. if (ioc->id != ioc_number)
  391. continue;
  392. /* Check whether this ioctl command is from right
  393. * ioctl device or not, if not continue the search.
  394. */
  395. version = ioc->hba_mpi_version_belonged;
  396. /* MPI25_VERSION and MPI26_VERSION uses same ioctl
  397. * device.
  398. */
  399. if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
  400. if ((version == MPI25_VERSION) ||
  401. (version == MPI26_VERSION))
  402. goto out;
  403. else
  404. continue;
  405. } else {
  406. if (version != mpi_version)
  407. continue;
  408. }
  409. out:
  410. spin_unlock(&gioc_lock);
  411. *iocpp = ioc;
  412. return ioc_number;
  413. }
  414. spin_unlock(&gioc_lock);
  415. *iocpp = NULL;
  416. return -1;
  417. }
  418. /**
  419. * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl)
  420. * @ioc: per adapter object
  421. *
  422. * The handler for doing any required cleanup or initialization.
  423. */
  424. void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
  425. {
  426. int i;
  427. u8 issue_reset;
  428. dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
  429. for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
  430. if (!(ioc->diag_buffer_status[i] &
  431. MPT3_DIAG_BUFFER_IS_REGISTERED))
  432. continue;
  433. if ((ioc->diag_buffer_status[i] &
  434. MPT3_DIAG_BUFFER_IS_RELEASED))
  435. continue;
  436. /*
  437. * add a log message to indicate the release
  438. */
  439. ioc_info(ioc,
  440. "%s: Releasing the trace buffer due to adapter reset.",
  441. __func__);
  442. ioc->htb_rel.buffer_rel_condition =
  443. MPT3_DIAG_BUFFER_REL_TRIGGER;
  444. mpt3sas_send_diag_release(ioc, i, &issue_reset);
  445. }
  446. }
  447. /**
  448. * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd.
  449. * @ioc: per adapter object
  450. *
  451. * The handler for doing any required cleanup or initialization.
  452. */
  453. void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
  454. {
  455. dtmprintk(ioc,
  456. ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__));
  457. if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
  458. ioc->ctl_cmds.status |= MPT3_CMD_RESET;
  459. mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
  460. complete(&ioc->ctl_cmds.done);
  461. }
  462. }
  463. /**
  464. * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl)
  465. * @ioc: per adapter object
  466. *
  467. * The handler for doing any required cleanup or initialization.
  468. */
  469. void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
  470. {
  471. int i;
  472. dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
  473. for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
  474. if (!(ioc->diag_buffer_status[i] &
  475. MPT3_DIAG_BUFFER_IS_REGISTERED))
  476. continue;
  477. if ((ioc->diag_buffer_status[i] &
  478. MPT3_DIAG_BUFFER_IS_RELEASED))
  479. continue;
  480. ioc->diag_buffer_status[i] |=
  481. MPT3_DIAG_BUFFER_IS_DIAG_RESET;
  482. }
  483. }
  484. /**
  485. * _ctl_fasync -
  486. * @fd: ?
  487. * @filep: ?
  488. * @mode: ?
  489. *
  490. * Called when application request fasyn callback handler.
  491. */
  492. static int
  493. _ctl_fasync(int fd, struct file *filep, int mode)
  494. {
  495. return fasync_helper(fd, filep, mode, &async_queue);
  496. }
  497. /**
  498. * _ctl_poll -
  499. * @filep: ?
  500. * @wait: ?
  501. *
  502. */
  503. static __poll_t
  504. _ctl_poll(struct file *filep, poll_table *wait)
  505. {
  506. struct MPT3SAS_ADAPTER *ioc;
  507. poll_wait(filep, &ctl_poll_wait, wait);
  508. /* global ioc lock to protect controller on list operations */
  509. spin_lock(&gioc_lock);
  510. list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
  511. if (ioc->aen_event_read_flag) {
  512. spin_unlock(&gioc_lock);
  513. return EPOLLIN | EPOLLRDNORM;
  514. }
  515. }
  516. spin_unlock(&gioc_lock);
  517. return 0;
  518. }
  519. /**
  520. * _ctl_set_task_mid - assign an active smid to tm request
  521. * @ioc: per adapter object
  522. * @karg: (struct mpt3_ioctl_command)
  523. * @tm_request: pointer to mf from user space
  524. *
  525. * Return: 0 when an smid if found, else fail.
  526. * during failure, the reply frame is filled.
  527. */
  528. static int
  529. _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
  530. Mpi2SCSITaskManagementRequest_t *tm_request)
  531. {
  532. bool found = false;
  533. u16 smid;
  534. u16 handle;
  535. struct scsi_cmnd *scmd;
  536. struct MPT3SAS_DEVICE *priv_data;
  537. Mpi2SCSITaskManagementReply_t *tm_reply;
  538. u32 sz;
  539. u32 lun;
  540. char *desc = NULL;
  541. if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
  542. desc = "abort_task";
  543. else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
  544. desc = "query_task";
  545. else
  546. return 0;
  547. lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
  548. handle = le16_to_cpu(tm_request->DevHandle);
  549. for (smid = ioc->scsiio_depth; smid && !found; smid--) {
  550. struct scsiio_tracker *st;
  551. __le16 task_mid;
  552. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  553. if (!scmd)
  554. continue;
  555. if (lun != scmd->device->lun)
  556. continue;
  557. priv_data = scmd->device->hostdata;
  558. if (priv_data->sas_target == NULL)
  559. continue;
  560. if (priv_data->sas_target->handle != handle)
  561. continue;
  562. st = scsi_cmd_priv(scmd);
  563. /*
  564. * If the given TaskMID from the user space is zero, then the
  565. * first outstanding smid will be picked up. Otherwise,
  566. * targeted smid will be the one.
  567. */
  568. task_mid = cpu_to_le16(st->smid);
  569. if (!tm_request->TaskMID)
  570. tm_request->TaskMID = task_mid;
  571. found = tm_request->TaskMID == task_mid;
  572. }
  573. if (!found) {
  574. dctlprintk(ioc,
  575. ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
  576. desc, le16_to_cpu(tm_request->DevHandle),
  577. lun));
  578. tm_reply = ioc->ctl_cmds.reply;
  579. tm_reply->DevHandle = tm_request->DevHandle;
  580. tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  581. tm_reply->TaskType = tm_request->TaskType;
  582. tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
  583. tm_reply->VP_ID = tm_request->VP_ID;
  584. tm_reply->VF_ID = tm_request->VF_ID;
  585. sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
  586. if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
  587. sz))
  588. pr_err("failure at %s:%d/%s()!\n", __FILE__,
  589. __LINE__, __func__);
  590. return 1;
  591. }
  592. dctlprintk(ioc,
  593. ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
  594. desc, le16_to_cpu(tm_request->DevHandle), lun,
  595. le16_to_cpu(tm_request->TaskMID)));
  596. return 0;
  597. }
  598. /**
  599. * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
  600. * @ioc: per adapter object
  601. * @karg: (struct mpt3_ioctl_command)
  602. * @mf: pointer to mf in user space
  603. */
  604. static long
  605. _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
  606. void __user *mf)
  607. {
  608. MPI2RequestHeader_t *mpi_request = NULL, *request;
  609. MPI2DefaultReply_t *mpi_reply;
  610. Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
  611. struct _pcie_device *pcie_device = NULL;
  612. u16 smid;
  613. unsigned long timeout;
  614. u8 issue_reset;
  615. u32 sz, sz_arg;
  616. void *psge;
  617. void *data_out = NULL;
  618. dma_addr_t data_out_dma = 0;
  619. size_t data_out_sz = 0;
  620. void *data_in = NULL;
  621. dma_addr_t data_in_dma = 0;
  622. size_t data_in_sz = 0;
  623. long ret;
  624. u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
  625. issue_reset = 0;
  626. if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
  627. ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
  628. ret = -EAGAIN;
  629. goto out;
  630. }
  631. ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
  632. if (ret)
  633. goto out;
  634. mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
  635. if (!mpi_request) {
  636. ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
  637. __func__);
  638. ret = -ENOMEM;
  639. goto out;
  640. }
  641. /* Check for overflow and wraparound */
  642. if (karg.data_sge_offset * 4 > ioc->request_sz ||
  643. karg.data_sge_offset > (UINT_MAX / 4)) {
  644. ret = -EINVAL;
  645. goto out;
  646. }
  647. /* copy in request message frame from user */
  648. if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
  649. pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
  650. __func__);
  651. ret = -EFAULT;
  652. goto out;
  653. }
  654. if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
  655. smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
  656. if (!smid) {
  657. ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
  658. ret = -EAGAIN;
  659. goto out;
  660. }
  661. } else {
  662. /* Use first reserved smid for passthrough ioctls */
  663. smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
  664. }
  665. ret = 0;
  666. ioc->ctl_cmds.status = MPT3_CMD_PENDING;
  667. memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
  668. request = mpt3sas_base_get_msg_frame(ioc, smid);
  669. memset(request, 0, ioc->request_sz);
  670. memcpy(request, mpi_request, karg.data_sge_offset*4);
  671. ioc->ctl_cmds.smid = smid;
  672. data_out_sz = karg.data_out_size;
  673. data_in_sz = karg.data_in_size;
  674. if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
  675. mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
  676. mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
  677. mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
  678. mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
  679. device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
  680. if (!device_handle || (device_handle >
  681. ioc->facts.MaxDevHandle)) {
  682. ret = -EINVAL;
  683. mpt3sas_base_free_smid(ioc, smid);
  684. goto out;
  685. }
  686. }
  687. /* obtain dma-able memory for data transfer */
  688. if (data_out_sz) /* WRITE */ {
  689. data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
  690. &data_out_dma, GFP_KERNEL);
  691. if (!data_out) {
  692. pr_err("failure at %s:%d/%s()!\n", __FILE__,
  693. __LINE__, __func__);
  694. ret = -ENOMEM;
  695. mpt3sas_base_free_smid(ioc, smid);
  696. goto out;
  697. }
  698. if (copy_from_user(data_out, karg.data_out_buf_ptr,
  699. data_out_sz)) {
  700. pr_err("failure at %s:%d/%s()!\n", __FILE__,
  701. __LINE__, __func__);
  702. ret = -EFAULT;
  703. mpt3sas_base_free_smid(ioc, smid);
  704. goto out;
  705. }
  706. }
  707. if (data_in_sz) /* READ */ {
  708. data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
  709. &data_in_dma, GFP_KERNEL);
  710. if (!data_in) {
  711. pr_err("failure at %s:%d/%s()!\n", __FILE__,
  712. __LINE__, __func__);
  713. ret = -ENOMEM;
  714. mpt3sas_base_free_smid(ioc, smid);
  715. goto out;
  716. }
  717. }
  718. psge = (void *)request + (karg.data_sge_offset*4);
  719. /* send command to firmware */
  720. _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
  721. init_completion(&ioc->ctl_cmds.done);
  722. switch (mpi_request->Function) {
  723. case MPI2_FUNCTION_NVME_ENCAPSULATED:
  724. {
  725. nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
  726. if (!ioc->pcie_sg_lookup) {
  727. dtmprintk(ioc, ioc_info(ioc,
  728. "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
  729. ));
  730. if (ioc->logging_level & MPT_DEBUG_TM)
  731. _debug_dump_mf(nvme_encap_request,
  732. ioc->request_sz/4);
  733. mpt3sas_base_free_smid(ioc, smid);
  734. ret = -EINVAL;
  735. goto out;
  736. }
  737. /*
  738. * Get the Physical Address of the sense buffer.
  739. * Use Error Response buffer address field to hold the sense
  740. * buffer address.
  741. * Clear the internal sense buffer, which will potentially hold
  742. * the Completion Queue Entry on return, or 0 if no Entry.
  743. * Build the PRPs and set direction bits.
  744. * Send the request.
  745. */
  746. nvme_encap_request->ErrorResponseBaseAddress =
  747. cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
  748. nvme_encap_request->ErrorResponseBaseAddress |=
  749. cpu_to_le64(le32_to_cpu(
  750. mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
  751. nvme_encap_request->ErrorResponseAllocationLength =
  752. cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
  753. memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
  754. ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
  755. data_out_dma, data_out_sz, data_in_dma, data_in_sz);
  756. if (test_bit(device_handle, ioc->device_remove_in_progress)) {
  757. dtmprintk(ioc,
  758. ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
  759. device_handle));
  760. mpt3sas_base_free_smid(ioc, smid);
  761. ret = -EINVAL;
  762. goto out;
  763. }
  764. mpt3sas_base_put_smid_nvme_encap(ioc, smid);
  765. break;
  766. }
  767. case MPI2_FUNCTION_SCSI_IO_REQUEST:
  768. case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
  769. {
  770. Mpi2SCSIIORequest_t *scsiio_request =
  771. (Mpi2SCSIIORequest_t *)request;
  772. scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
  773. scsiio_request->SenseBufferLowAddress =
  774. mpt3sas_base_get_sense_buffer_dma(ioc, smid);
  775. memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
  776. if (test_bit(device_handle, ioc->device_remove_in_progress)) {
  777. dtmprintk(ioc,
  778. ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
  779. device_handle));
  780. mpt3sas_base_free_smid(ioc, smid);
  781. ret = -EINVAL;
  782. goto out;
  783. }
  784. ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
  785. data_in_dma, data_in_sz);
  786. if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
  787. ioc->put_smid_scsi_io(ioc, smid, device_handle);
  788. else
  789. ioc->put_smid_default(ioc, smid);
  790. break;
  791. }
  792. case MPI2_FUNCTION_SCSI_TASK_MGMT:
  793. {
  794. Mpi2SCSITaskManagementRequest_t *tm_request =
  795. (Mpi2SCSITaskManagementRequest_t *)request;
  796. dtmprintk(ioc,
  797. ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
  798. le16_to_cpu(tm_request->DevHandle),
  799. tm_request->TaskType));
  800. ioc->got_task_abort_from_ioctl = 1;
  801. if (tm_request->TaskType ==
  802. MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
  803. tm_request->TaskType ==
  804. MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
  805. if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
  806. mpt3sas_base_free_smid(ioc, smid);
  807. ioc->got_task_abort_from_ioctl = 0;
  808. goto out;
  809. }
  810. }
  811. ioc->got_task_abort_from_ioctl = 0;
  812. if (test_bit(device_handle, ioc->device_remove_in_progress)) {
  813. dtmprintk(ioc,
  814. ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
  815. device_handle));
  816. mpt3sas_base_free_smid(ioc, smid);
  817. ret = -EINVAL;
  818. goto out;
  819. }
  820. mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
  821. tm_request->DevHandle));
  822. ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
  823. data_in_dma, data_in_sz);
  824. ioc->put_smid_hi_priority(ioc, smid, 0);
  825. break;
  826. }
  827. case MPI2_FUNCTION_SMP_PASSTHROUGH:
  828. {
  829. Mpi2SmpPassthroughRequest_t *smp_request =
  830. (Mpi2SmpPassthroughRequest_t *)mpi_request;
  831. u8 *data;
  832. if (!ioc->multipath_on_hba) {
  833. /* ioc determines which port to use */
  834. smp_request->PhysicalPort = 0xFF;
  835. }
  836. if (smp_request->PassthroughFlags &
  837. MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
  838. data = (u8 *)&smp_request->SGL;
  839. else {
  840. if (unlikely(data_out == NULL)) {
  841. pr_err("failure at %s:%d/%s()!\n",
  842. __FILE__, __LINE__, __func__);
  843. mpt3sas_base_free_smid(ioc, smid);
  844. ret = -EINVAL;
  845. goto out;
  846. }
  847. data = data_out;
  848. }
  849. if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
  850. ioc->ioc_link_reset_in_progress = 1;
  851. ioc->ignore_loginfos = 1;
  852. }
  853. ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
  854. data_in_sz);
  855. ioc->put_smid_default(ioc, smid);
  856. break;
  857. }
  858. case MPI2_FUNCTION_SATA_PASSTHROUGH:
  859. {
  860. if (test_bit(device_handle, ioc->device_remove_in_progress)) {
  861. dtmprintk(ioc,
  862. ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
  863. device_handle));
  864. mpt3sas_base_free_smid(ioc, smid);
  865. ret = -EINVAL;
  866. goto out;
  867. }
  868. ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
  869. data_in_sz);
  870. ioc->put_smid_default(ioc, smid);
  871. break;
  872. }
  873. case MPI2_FUNCTION_FW_DOWNLOAD:
  874. {
  875. if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
  876. ioc_info(ioc, "Firmware download not supported for ATTO HBA.\n");
  877. ret = -EPERM;
  878. break;
  879. }
  880. fallthrough;
  881. }
  882. case MPI2_FUNCTION_FW_UPLOAD:
  883. {
  884. ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
  885. data_in_sz);
  886. ioc->put_smid_default(ioc, smid);
  887. break;
  888. }
  889. case MPI2_FUNCTION_TOOLBOX:
  890. {
  891. Mpi2ToolboxCleanRequest_t *toolbox_request =
  892. (Mpi2ToolboxCleanRequest_t *)mpi_request;
  893. if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL)
  894. || (toolbox_request->Tool ==
  895. MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN))
  896. ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
  897. data_in_dma, data_in_sz);
  898. else if (toolbox_request->Tool ==
  899. MPI2_TOOLBOX_MEMORY_MOVE_TOOL) {
  900. Mpi2ToolboxMemMoveRequest_t *mem_move_request =
  901. (Mpi2ToolboxMemMoveRequest_t *)request;
  902. Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL;
  903. ioc->build_sg_mpi(ioc, psge, data_out_dma,
  904. data_out_sz, data_in_dma, data_in_sz);
  905. if (data_out_sz && !data_in_sz) {
  906. dst =
  907. (Mpi2SGESimple64_t *)&mem_move_request->SGL;
  908. src = (void *)dst + ioc->sge_size;
  909. memcpy(&tmp, src, ioc->sge_size);
  910. memcpy(src, dst, ioc->sge_size);
  911. memcpy(dst, &tmp, ioc->sge_size);
  912. }
  913. if (ioc->logging_level & MPT_DEBUG_TM) {
  914. ioc_info(ioc,
  915. "Mpi2ToolboxMemMoveRequest_t request msg\n");
  916. _debug_dump_mf(mem_move_request,
  917. ioc->request_sz/4);
  918. }
  919. } else
  920. ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
  921. data_in_dma, data_in_sz);
  922. ioc->put_smid_default(ioc, smid);
  923. break;
  924. }
  925. case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
  926. {
  927. Mpi2SasIoUnitControlRequest_t *sasiounit_request =
  928. (Mpi2SasIoUnitControlRequest_t *)mpi_request;
  929. if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
  930. || sasiounit_request->Operation ==
  931. MPI2_SAS_OP_PHY_LINK_RESET) {
  932. ioc->ioc_link_reset_in_progress = 1;
  933. ioc->ignore_loginfos = 1;
  934. }
  935. /* drop to default case for posting the request */
  936. }
  937. fallthrough;
  938. default:
  939. ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
  940. data_in_dma, data_in_sz);
  941. ioc->put_smid_default(ioc, smid);
  942. break;
  943. }
  944. if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
  945. timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
  946. else
  947. timeout = karg.timeout;
  948. wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
  949. if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
  950. Mpi2SCSITaskManagementRequest_t *tm_request =
  951. (Mpi2SCSITaskManagementRequest_t *)mpi_request;
  952. mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
  953. tm_request->DevHandle));
  954. mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
  955. } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
  956. mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
  957. ioc->ioc_link_reset_in_progress) {
  958. ioc->ioc_link_reset_in_progress = 0;
  959. ioc->ignore_loginfos = 0;
  960. }
  961. if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
  962. mpt3sas_check_cmd_timeout(ioc,
  963. ioc->ctl_cmds.status, mpi_request,
  964. karg.data_sge_offset, issue_reset);
  965. goto issue_host_reset;
  966. }
  967. mpi_reply = ioc->ctl_cmds.reply;
  968. if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
  969. (ioc->logging_level & MPT_DEBUG_TM)) {
  970. Mpi2SCSITaskManagementReply_t *tm_reply =
  971. (Mpi2SCSITaskManagementReply_t *)mpi_reply;
  972. ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
  973. le16_to_cpu(tm_reply->IOCStatus),
  974. le32_to_cpu(tm_reply->IOCLogInfo),
  975. le32_to_cpu(tm_reply->TerminationCount));
  976. }
  977. /* copy out xdata to user */
  978. if (data_in_sz) {
  979. if (copy_to_user(karg.data_in_buf_ptr, data_in,
  980. data_in_sz)) {
  981. pr_err("failure at %s:%d/%s()!\n", __FILE__,
  982. __LINE__, __func__);
  983. ret = -ENODATA;
  984. goto out;
  985. }
  986. }
  987. /* copy out reply message frame to user */
  988. if (karg.max_reply_bytes) {
  989. sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
  990. if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
  991. sz)) {
  992. pr_err("failure at %s:%d/%s()!\n", __FILE__,
  993. __LINE__, __func__);
  994. ret = -ENODATA;
  995. goto out;
  996. }
  997. }
  998. /* copy out sense/NVMe Error Response to user */
  999. if (karg.max_sense_bytes && (mpi_request->Function ==
  1000. MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
  1001. MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
  1002. MPI2_FUNCTION_NVME_ENCAPSULATED)) {
  1003. if (karg.sense_data_ptr == NULL) {
  1004. ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
  1005. goto out;
  1006. }
  1007. sz_arg = (mpi_request->Function ==
  1008. MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
  1009. SCSI_SENSE_BUFFERSIZE;
  1010. sz = min_t(u32, karg.max_sense_bytes, sz_arg);
  1011. if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
  1012. sz)) {
  1013. pr_err("failure at %s:%d/%s()!\n", __FILE__,
  1014. __LINE__, __func__);
  1015. ret = -ENODATA;
  1016. goto out;
  1017. }
  1018. }
  1019. issue_host_reset:
  1020. if (issue_reset) {
  1021. ret = -ENODATA;
  1022. if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
  1023. mpi_request->Function ==
  1024. MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
  1025. mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
  1026. ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
  1027. le16_to_cpu(mpi_request->FunctionDependent1));
  1028. mpt3sas_halt_firmware(ioc);
  1029. pcie_device = mpt3sas_get_pdev_by_handle(ioc,
  1030. le16_to_cpu(mpi_request->FunctionDependent1));
  1031. if (pcie_device && (!ioc->tm_custom_handling) &&
  1032. (!(mpt3sas_scsih_is_pcie_scsi_device(
  1033. pcie_device->device_info))))
  1034. mpt3sas_scsih_issue_locked_tm(ioc,
  1035. le16_to_cpu(mpi_request->FunctionDependent1),
  1036. 0, 0, 0,
  1037. MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
  1038. 0, pcie_device->reset_timeout,
  1039. MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
  1040. else
  1041. mpt3sas_scsih_issue_locked_tm(ioc,
  1042. le16_to_cpu(mpi_request->FunctionDependent1),
  1043. 0, 0, 0,
  1044. MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
  1045. 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
  1046. } else
  1047. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  1048. }
  1049. out:
  1050. if (pcie_device)
  1051. pcie_device_put(pcie_device);
  1052. /* free memory associated with sg buffers */
  1053. if (data_in)
  1054. dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
  1055. data_in_dma);
  1056. if (data_out)
  1057. dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
  1058. data_out_dma);
  1059. kfree(mpi_request);
  1060. ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
  1061. return ret;
  1062. }
  1063. /**
  1064. * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
  1065. * @ioc: per adapter object
  1066. * @arg: user space buffer containing ioctl content
  1067. */
  1068. static long
  1069. _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  1070. {
  1071. struct mpt3_ioctl_iocinfo karg;
  1072. dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
  1073. __func__));
  1074. memset(&karg, 0 , sizeof(karg));
  1075. if (ioc->pfacts)
  1076. karg.port_number = ioc->pfacts[0].PortNumber;
  1077. karg.hw_rev = ioc->pdev->revision;
  1078. karg.pci_id = ioc->pdev->device;
  1079. karg.subsystem_device = ioc->pdev->subsystem_device;
  1080. karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
  1081. karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
  1082. karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
  1083. karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
  1084. karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
  1085. karg.firmware_version = ioc->facts.FWVersion.Word;
  1086. strcpy(karg.driver_version, ioc->driver_name);
  1087. strcat(karg.driver_version, "-");
  1088. switch (ioc->hba_mpi_version_belonged) {
  1089. case MPI2_VERSION:
  1090. if (ioc->is_warpdrive)
  1091. karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
  1092. else
  1093. karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
  1094. strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
  1095. break;
  1096. case MPI25_VERSION:
  1097. case MPI26_VERSION:
  1098. if (ioc->is_gen35_ioc)
  1099. karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
  1100. else
  1101. karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
  1102. strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
  1103. break;
  1104. }
  1105. karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
  1106. if (copy_to_user(arg, &karg, sizeof(karg))) {
  1107. pr_err("failure at %s:%d/%s()!\n",
  1108. __FILE__, __LINE__, __func__);
  1109. return -EFAULT;
  1110. }
  1111. return 0;
  1112. }
  1113. /**
  1114. * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
  1115. * @ioc: per adapter object
  1116. * @arg: user space buffer containing ioctl content
  1117. */
  1118. static long
  1119. _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  1120. {
  1121. struct mpt3_ioctl_eventquery karg;
  1122. if (copy_from_user(&karg, arg, sizeof(karg))) {
  1123. pr_err("failure at %s:%d/%s()!\n",
  1124. __FILE__, __LINE__, __func__);
  1125. return -EFAULT;
  1126. }
  1127. dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
  1128. __func__));
  1129. karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
  1130. memcpy(karg.event_types, ioc->event_type,
  1131. MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
  1132. if (copy_to_user(arg, &karg, sizeof(karg))) {
  1133. pr_err("failure at %s:%d/%s()!\n",
  1134. __FILE__, __LINE__, __func__);
  1135. return -EFAULT;
  1136. }
  1137. return 0;
  1138. }
  1139. /**
  1140. * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
  1141. * @ioc: per adapter object
  1142. * @arg: user space buffer containing ioctl content
  1143. */
  1144. static long
  1145. _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  1146. {
  1147. struct mpt3_ioctl_eventenable karg;
  1148. if (copy_from_user(&karg, arg, sizeof(karg))) {
  1149. pr_err("failure at %s:%d/%s()!\n",
  1150. __FILE__, __LINE__, __func__);
  1151. return -EFAULT;
  1152. }
  1153. dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
  1154. __func__));
  1155. memcpy(ioc->event_type, karg.event_types,
  1156. MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
  1157. mpt3sas_base_validate_event_type(ioc, ioc->event_type);
  1158. if (ioc->event_log)
  1159. return 0;
  1160. /* initialize event_log */
  1161. ioc->event_context = 0;
  1162. ioc->aen_event_read_flag = 0;
  1163. ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
  1164. sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
  1165. if (!ioc->event_log) {
  1166. pr_err("failure at %s:%d/%s()!\n",
  1167. __FILE__, __LINE__, __func__);
  1168. return -ENOMEM;
  1169. }
  1170. return 0;
  1171. }
  1172. /**
  1173. * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
  1174. * @ioc: per adapter object
  1175. * @arg: user space buffer containing ioctl content
  1176. */
  1177. static long
  1178. _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  1179. {
  1180. struct mpt3_ioctl_eventreport karg;
  1181. u32 number_bytes, max_events, max;
  1182. struct mpt3_ioctl_eventreport __user *uarg = arg;
  1183. if (copy_from_user(&karg, arg, sizeof(karg))) {
  1184. pr_err("failure at %s:%d/%s()!\n",
  1185. __FILE__, __LINE__, __func__);
  1186. return -EFAULT;
  1187. }
  1188. dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
  1189. __func__));
  1190. number_bytes = karg.hdr.max_data_size -
  1191. sizeof(struct mpt3_ioctl_header);
  1192. max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
  1193. max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
  1194. /* If fewer than 1 event is requested, there must have
  1195. * been some type of error.
  1196. */
  1197. if (!max || !ioc->event_log)
  1198. return -ENODATA;
  1199. number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
  1200. if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
  1201. pr_err("failure at %s:%d/%s()!\n",
  1202. __FILE__, __LINE__, __func__);
  1203. return -EFAULT;
  1204. }
  1205. /* reset flag so SIGIO can restart */
  1206. ioc->aen_event_read_flag = 0;
  1207. return 0;
  1208. }
  1209. /**
  1210. * _ctl_do_reset - main handler for MPT3HARDRESET opcode
  1211. * @ioc: per adapter object
  1212. * @arg: user space buffer containing ioctl content
  1213. */
  1214. static long
  1215. _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  1216. {
  1217. struct mpt3_ioctl_diag_reset karg;
  1218. int retval;
  1219. if (copy_from_user(&karg, arg, sizeof(karg))) {
  1220. pr_err("failure at %s:%d/%s()!\n",
  1221. __FILE__, __LINE__, __func__);
  1222. return -EFAULT;
  1223. }
  1224. if (ioc->shost_recovery || ioc->pci_error_recovery ||
  1225. ioc->is_driver_loading)
  1226. return -EAGAIN;
  1227. dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
  1228. __func__));
  1229. ioc->reset_from_user = 1;
  1230. retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  1231. ioc_info(ioc,
  1232. "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
  1233. return 0;
  1234. }
  1235. /**
  1236. * _ctl_btdh_search_sas_device - searching for sas device
  1237. * @ioc: per adapter object
  1238. * @btdh: btdh ioctl payload
  1239. */
  1240. static int
  1241. _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
  1242. struct mpt3_ioctl_btdh_mapping *btdh)
  1243. {
  1244. struct _sas_device *sas_device;
  1245. unsigned long flags;
  1246. int rc = 0;
  1247. if (list_empty(&ioc->sas_device_list))
  1248. return rc;
  1249. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1250. list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
  1251. if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
  1252. btdh->handle == sas_device->handle) {
  1253. btdh->bus = sas_device->channel;
  1254. btdh->id = sas_device->id;
  1255. rc = 1;
  1256. goto out;
  1257. } else if (btdh->bus == sas_device->channel && btdh->id ==
  1258. sas_device->id && btdh->handle == 0xFFFF) {
  1259. btdh->handle = sas_device->handle;
  1260. rc = 1;
  1261. goto out;
  1262. }
  1263. }
  1264. out:
  1265. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1266. return rc;
  1267. }
  1268. /**
  1269. * _ctl_btdh_search_pcie_device - searching for pcie device
  1270. * @ioc: per adapter object
  1271. * @btdh: btdh ioctl payload
  1272. */
  1273. static int
  1274. _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
  1275. struct mpt3_ioctl_btdh_mapping *btdh)
  1276. {
  1277. struct _pcie_device *pcie_device;
  1278. unsigned long flags;
  1279. int rc = 0;
  1280. if (list_empty(&ioc->pcie_device_list))
  1281. return rc;
  1282. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1283. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
  1284. if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
  1285. btdh->handle == pcie_device->handle) {
  1286. btdh->bus = pcie_device->channel;
  1287. btdh->id = pcie_device->id;
  1288. rc = 1;
  1289. goto out;
  1290. } else if (btdh->bus == pcie_device->channel && btdh->id ==
  1291. pcie_device->id && btdh->handle == 0xFFFF) {
  1292. btdh->handle = pcie_device->handle;
  1293. rc = 1;
  1294. goto out;
  1295. }
  1296. }
  1297. out:
  1298. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1299. return rc;
  1300. }
  1301. /**
  1302. * _ctl_btdh_search_raid_device - searching for raid device
  1303. * @ioc: per adapter object
  1304. * @btdh: btdh ioctl payload
  1305. */
  1306. static int
  1307. _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
  1308. struct mpt3_ioctl_btdh_mapping *btdh)
  1309. {
  1310. struct _raid_device *raid_device;
  1311. unsigned long flags;
  1312. int rc = 0;
  1313. if (list_empty(&ioc->raid_device_list))
  1314. return rc;
  1315. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1316. list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
  1317. if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
  1318. btdh->handle == raid_device->handle) {
  1319. btdh->bus = raid_device->channel;
  1320. btdh->id = raid_device->id;
  1321. rc = 1;
  1322. goto out;
  1323. } else if (btdh->bus == raid_device->channel && btdh->id ==
  1324. raid_device->id && btdh->handle == 0xFFFF) {
  1325. btdh->handle = raid_device->handle;
  1326. rc = 1;
  1327. goto out;
  1328. }
  1329. }
  1330. out:
  1331. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1332. return rc;
  1333. }
  1334. /**
  1335. * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
  1336. * @ioc: per adapter object
  1337. * @arg: user space buffer containing ioctl content
  1338. */
  1339. static long
  1340. _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  1341. {
  1342. struct mpt3_ioctl_btdh_mapping karg;
  1343. int rc;
  1344. if (copy_from_user(&karg, arg, sizeof(karg))) {
  1345. pr_err("failure at %s:%d/%s()!\n",
  1346. __FILE__, __LINE__, __func__);
  1347. return -EFAULT;
  1348. }
  1349. dctlprintk(ioc, ioc_info(ioc, "%s\n",
  1350. __func__));
  1351. rc = _ctl_btdh_search_sas_device(ioc, &karg);
  1352. if (!rc)
  1353. rc = _ctl_btdh_search_pcie_device(ioc, &karg);
  1354. if (!rc)
  1355. _ctl_btdh_search_raid_device(ioc, &karg);
  1356. if (copy_to_user(arg, &karg, sizeof(karg))) {
  1357. pr_err("failure at %s:%d/%s()!\n",
  1358. __FILE__, __LINE__, __func__);
  1359. return -EFAULT;
  1360. }
  1361. return 0;
  1362. }
  1363. /**
  1364. * _ctl_diag_capability - return diag buffer capability
  1365. * @ioc: per adapter object
  1366. * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
  1367. *
  1368. * returns 1 when diag buffer support is enabled in firmware
  1369. */
  1370. static u8
  1371. _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
  1372. {
  1373. u8 rc = 0;
  1374. switch (buffer_type) {
  1375. case MPI2_DIAG_BUF_TYPE_TRACE:
  1376. if (ioc->facts.IOCCapabilities &
  1377. MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
  1378. rc = 1;
  1379. break;
  1380. case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
  1381. if (ioc->facts.IOCCapabilities &
  1382. MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
  1383. rc = 1;
  1384. break;
  1385. case MPI2_DIAG_BUF_TYPE_EXTENDED:
  1386. if (ioc->facts.IOCCapabilities &
  1387. MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
  1388. rc = 1;
  1389. }
  1390. return rc;
  1391. }
  1392. /**
  1393. * _ctl_diag_get_bufftype - return diag buffer type
  1394. * either TRACE, SNAPSHOT, or EXTENDED
  1395. * @ioc: per adapter object
  1396. * @unique_id: specifies the unique_id for the buffer
  1397. *
  1398. * returns MPT3_DIAG_UID_NOT_FOUND if the id not found
  1399. */
  1400. static u8
  1401. _ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id)
  1402. {
  1403. u8 index;
  1404. for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
  1405. if (ioc->unique_id[index] == unique_id)
  1406. return index;
  1407. }
  1408. return MPT3_DIAG_UID_NOT_FOUND;
  1409. }
  1410. /**
  1411. * _ctl_diag_register_2 - wrapper for registering diag buffer support
  1412. * @ioc: per adapter object
  1413. * @diag_register: the diag_register struct passed in from user space
  1414. *
  1415. */
  1416. static long
  1417. _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
  1418. struct mpt3_diag_register *diag_register)
  1419. {
  1420. int rc, i;
  1421. void *request_data = NULL;
  1422. dma_addr_t request_data_dma;
  1423. u32 request_data_sz = 0;
  1424. Mpi2DiagBufferPostRequest_t *mpi_request;
  1425. Mpi2DiagBufferPostReply_t *mpi_reply;
  1426. u8 buffer_type;
  1427. u16 smid;
  1428. u16 ioc_status;
  1429. u32 ioc_state;
  1430. u8 issue_reset = 0;
  1431. dctlprintk(ioc, ioc_info(ioc, "%s\n",
  1432. __func__));
  1433. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  1434. if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  1435. ioc_err(ioc, "%s: failed due to ioc not operational\n",
  1436. __func__);
  1437. rc = -EAGAIN;
  1438. goto out;
  1439. }
  1440. if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
  1441. ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
  1442. rc = -EAGAIN;
  1443. goto out;
  1444. }
  1445. buffer_type = diag_register->buffer_type;
  1446. if (!_ctl_diag_capability(ioc, buffer_type)) {
  1447. ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
  1448. __func__, buffer_type);
  1449. return -EPERM;
  1450. }
  1451. if (diag_register->unique_id == 0) {
  1452. ioc_err(ioc,
  1453. "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__,
  1454. diag_register->unique_id, buffer_type);
  1455. return -EINVAL;
  1456. }
  1457. if ((ioc->diag_buffer_status[buffer_type] &
  1458. MPT3_DIAG_BUFFER_IS_APP_OWNED) &&
  1459. !(ioc->diag_buffer_status[buffer_type] &
  1460. MPT3_DIAG_BUFFER_IS_RELEASED)) {
  1461. ioc_err(ioc,
  1462. "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n",
  1463. __func__, buffer_type, ioc->unique_id[buffer_type]);
  1464. return -EINVAL;
  1465. }
  1466. if (ioc->diag_buffer_status[buffer_type] &
  1467. MPT3_DIAG_BUFFER_IS_REGISTERED) {
  1468. /*
  1469. * If driver posts buffer initially, then an application wants
  1470. * to Register that buffer (own it) without Releasing first,
  1471. * the application Register command MUST have the same buffer
  1472. * type and size in the Register command (obtained from the
  1473. * Query command). Otherwise that Register command will be
  1474. * failed. If the application has released the buffer but wants
  1475. * to re-register it, it should be allowed as long as the
  1476. * Unique-Id/Size match.
  1477. */
  1478. if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID &&
  1479. ioc->diag_buffer_sz[buffer_type] ==
  1480. diag_register->requested_buffer_size) {
  1481. if (!(ioc->diag_buffer_status[buffer_type] &
  1482. MPT3_DIAG_BUFFER_IS_RELEASED)) {
  1483. dctlprintk(ioc, ioc_info(ioc,
  1484. "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n",
  1485. __func__, buffer_type,
  1486. ioc->unique_id[buffer_type],
  1487. diag_register->unique_id));
  1488. /*
  1489. * Application wants to own the buffer with
  1490. * the same size.
  1491. */
  1492. ioc->unique_id[buffer_type] =
  1493. diag_register->unique_id;
  1494. rc = 0; /* success */
  1495. goto out;
  1496. }
  1497. } else if (ioc->unique_id[buffer_type] !=
  1498. MPT3DIAGBUFFUNIQUEID) {
  1499. if (ioc->unique_id[buffer_type] !=
  1500. diag_register->unique_id ||
  1501. ioc->diag_buffer_sz[buffer_type] !=
  1502. diag_register->requested_buffer_size ||
  1503. !(ioc->diag_buffer_status[buffer_type] &
  1504. MPT3_DIAG_BUFFER_IS_RELEASED)) {
  1505. ioc_err(ioc,
  1506. "%s: already has a registered buffer for buffer_type(0x%02x)\n",
  1507. __func__, buffer_type);
  1508. return -EINVAL;
  1509. }
  1510. } else {
  1511. ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
  1512. __func__, buffer_type);
  1513. return -EINVAL;
  1514. }
  1515. } else if (ioc->diag_buffer_status[buffer_type] &
  1516. MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
  1517. if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID ||
  1518. ioc->diag_buffer_sz[buffer_type] !=
  1519. diag_register->requested_buffer_size) {
  1520. ioc_err(ioc,
  1521. "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n",
  1522. __func__, buffer_type,
  1523. ioc->diag_buffer_sz[buffer_type]);
  1524. return -EINVAL;
  1525. }
  1526. }
  1527. if (diag_register->requested_buffer_size % 4) {
  1528. ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
  1529. __func__);
  1530. return -EINVAL;
  1531. }
  1532. smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
  1533. if (!smid) {
  1534. ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
  1535. rc = -EAGAIN;
  1536. goto out;
  1537. }
  1538. rc = 0;
  1539. ioc->ctl_cmds.status = MPT3_CMD_PENDING;
  1540. memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
  1541. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  1542. memset(mpi_request, 0, ioc->request_sz);
  1543. ioc->ctl_cmds.smid = smid;
  1544. request_data = ioc->diag_buffer[buffer_type];
  1545. request_data_sz = diag_register->requested_buffer_size;
  1546. ioc->unique_id[buffer_type] = diag_register->unique_id;
  1547. /* Reset ioc variables used for additional query commands */
  1548. ioc->reset_from_user = 0;
  1549. memset(&ioc->htb_rel, 0, sizeof(struct htb_rel_query));
  1550. ioc->diag_buffer_status[buffer_type] &=
  1551. MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
  1552. memcpy(ioc->product_specific[buffer_type],
  1553. diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
  1554. ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
  1555. if (request_data) {
  1556. request_data_dma = ioc->diag_buffer_dma[buffer_type];
  1557. if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
  1558. dma_free_coherent(&ioc->pdev->dev,
  1559. ioc->diag_buffer_sz[buffer_type],
  1560. request_data, request_data_dma);
  1561. request_data = NULL;
  1562. }
  1563. }
  1564. if (request_data == NULL) {
  1565. ioc->diag_buffer_sz[buffer_type] = 0;
  1566. ioc->diag_buffer_dma[buffer_type] = 0;
  1567. request_data = dma_alloc_coherent(&ioc->pdev->dev,
  1568. request_data_sz, &request_data_dma, GFP_KERNEL);
  1569. if (request_data == NULL) {
  1570. ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
  1571. __func__, request_data_sz);
  1572. mpt3sas_base_free_smid(ioc, smid);
  1573. rc = -ENOMEM;
  1574. goto out;
  1575. }
  1576. ioc->diag_buffer[buffer_type] = request_data;
  1577. ioc->diag_buffer_sz[buffer_type] = request_data_sz;
  1578. ioc->diag_buffer_dma[buffer_type] = request_data_dma;
  1579. }
  1580. mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
  1581. mpi_request->BufferType = diag_register->buffer_type;
  1582. mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
  1583. mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
  1584. mpi_request->BufferLength = cpu_to_le32(request_data_sz);
  1585. mpi_request->VF_ID = 0; /* TODO */
  1586. mpi_request->VP_ID = 0;
  1587. dctlprintk(ioc,
  1588. ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
  1589. __func__, request_data,
  1590. (unsigned long long)request_data_dma,
  1591. le32_to_cpu(mpi_request->BufferLength)));
  1592. for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
  1593. mpi_request->ProductSpecific[i] =
  1594. cpu_to_le32(ioc->product_specific[buffer_type][i]);
  1595. init_completion(&ioc->ctl_cmds.done);
  1596. ioc->put_smid_default(ioc, smid);
  1597. wait_for_completion_timeout(&ioc->ctl_cmds.done,
  1598. MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
  1599. if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
  1600. mpt3sas_check_cmd_timeout(ioc,
  1601. ioc->ctl_cmds.status, mpi_request,
  1602. sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
  1603. goto issue_host_reset;
  1604. }
  1605. /* process the completed Reply Message Frame */
  1606. if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
  1607. ioc_err(ioc, "%s: no reply message\n", __func__);
  1608. rc = -EFAULT;
  1609. goto out;
  1610. }
  1611. mpi_reply = ioc->ctl_cmds.reply;
  1612. ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
  1613. if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
  1614. ioc->diag_buffer_status[buffer_type] |=
  1615. MPT3_DIAG_BUFFER_IS_REGISTERED;
  1616. dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
  1617. } else {
  1618. ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
  1619. __func__,
  1620. ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
  1621. rc = -EFAULT;
  1622. }
  1623. issue_host_reset:
  1624. if (issue_reset)
  1625. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  1626. out:
  1627. if (rc && request_data) {
  1628. dma_free_coherent(&ioc->pdev->dev, request_data_sz,
  1629. request_data, request_data_dma);
  1630. ioc->diag_buffer[buffer_type] = NULL;
  1631. ioc->diag_buffer_status[buffer_type] &=
  1632. ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
  1633. }
  1634. ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
  1635. return rc;
  1636. }
  1637. /**
  1638. * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
  1639. * @ioc: per adapter object
  1640. * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
  1641. *
  1642. * This is called when command line option diag_buffer_enable is enabled
  1643. * at driver load time.
  1644. */
  1645. void
  1646. mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
  1647. {
  1648. struct mpt3_diag_register diag_register;
  1649. u32 ret_val;
  1650. u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10;
  1651. u32 min_trace_buff_size = 0;
  1652. u32 decr_trace_buff_size = 0;
  1653. memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
  1654. if (bits_to_register & 1) {
  1655. ioc_info(ioc, "registering trace buffer support\n");
  1656. ioc->diag_trigger_master.MasterData =
  1657. (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
  1658. diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
  1659. diag_register.unique_id =
  1660. (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
  1661. (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
  1662. if (trace_buff_size != 0) {
  1663. diag_register.requested_buffer_size = trace_buff_size;
  1664. min_trace_buff_size =
  1665. ioc->manu_pg11.HostTraceBufferMinSizeKB<<10;
  1666. decr_trace_buff_size =
  1667. ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10;
  1668. if (min_trace_buff_size > trace_buff_size) {
  1669. /* The buff size is not set correctly */
  1670. ioc_err(ioc,
  1671. "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n",
  1672. min_trace_buff_size>>10,
  1673. trace_buff_size>>10);
  1674. ioc_err(ioc,
  1675. "Using zero Min Trace Buff Size\n");
  1676. min_trace_buff_size = 0;
  1677. }
  1678. if (decr_trace_buff_size == 0) {
  1679. /*
  1680. * retry the min size if decrement
  1681. * is not available.
  1682. */
  1683. decr_trace_buff_size =
  1684. trace_buff_size - min_trace_buff_size;
  1685. }
  1686. } else {
  1687. /* register for 2MB buffers */
  1688. diag_register.requested_buffer_size = 2 * (1024 * 1024);
  1689. }
  1690. do {
  1691. ret_val = _ctl_diag_register_2(ioc, &diag_register);
  1692. if (ret_val == -ENOMEM && min_trace_buff_size &&
  1693. (trace_buff_size - decr_trace_buff_size) >=
  1694. min_trace_buff_size) {
  1695. /* adjust the buffer size */
  1696. trace_buff_size -= decr_trace_buff_size;
  1697. diag_register.requested_buffer_size =
  1698. trace_buff_size;
  1699. } else
  1700. break;
  1701. } while (true);
  1702. if (ret_val == -ENOMEM)
  1703. ioc_err(ioc,
  1704. "Cannot allocate trace buffer memory. Last memory tried = %d KB\n",
  1705. diag_register.requested_buffer_size>>10);
  1706. else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
  1707. & MPT3_DIAG_BUFFER_IS_REGISTERED) {
  1708. ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
  1709. diag_register.requested_buffer_size>>10);
  1710. if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
  1711. ioc->diag_buffer_status[
  1712. MPI2_DIAG_BUF_TYPE_TRACE] |=
  1713. MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
  1714. }
  1715. }
  1716. if (bits_to_register & 2) {
  1717. ioc_info(ioc, "registering snapshot buffer support\n");
  1718. diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
  1719. /* register for 2MB buffers */
  1720. diag_register.requested_buffer_size = 2 * (1024 * 1024);
  1721. diag_register.unique_id = 0x7075901;
  1722. _ctl_diag_register_2(ioc, &diag_register);
  1723. }
  1724. if (bits_to_register & 4) {
  1725. ioc_info(ioc, "registering extended buffer support\n");
  1726. diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
  1727. /* register for 2MB buffers */
  1728. diag_register.requested_buffer_size = 2 * (1024 * 1024);
  1729. diag_register.unique_id = 0x7075901;
  1730. _ctl_diag_register_2(ioc, &diag_register);
  1731. }
  1732. }
  1733. /**
  1734. * _ctl_diag_register - application register with driver
  1735. * @ioc: per adapter object
  1736. * @arg: user space buffer containing ioctl content
  1737. *
  1738. * This will allow the driver to setup any required buffers that will be
  1739. * needed by firmware to communicate with the driver.
  1740. */
  1741. static long
  1742. _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  1743. {
  1744. struct mpt3_diag_register karg;
  1745. long rc;
  1746. if (copy_from_user(&karg, arg, sizeof(karg))) {
  1747. pr_err("failure at %s:%d/%s()!\n",
  1748. __FILE__, __LINE__, __func__);
  1749. return -EFAULT;
  1750. }
  1751. rc = _ctl_diag_register_2(ioc, &karg);
  1752. if (!rc && (ioc->diag_buffer_status[karg.buffer_type] &
  1753. MPT3_DIAG_BUFFER_IS_REGISTERED))
  1754. ioc->diag_buffer_status[karg.buffer_type] |=
  1755. MPT3_DIAG_BUFFER_IS_APP_OWNED;
  1756. return rc;
  1757. }
  1758. /**
  1759. * _ctl_diag_unregister - application unregister with driver
  1760. * @ioc: per adapter object
  1761. * @arg: user space buffer containing ioctl content
  1762. *
  1763. * This will allow the driver to cleanup any memory allocated for diag
  1764. * messages and to free up any resources.
  1765. */
  1766. static long
  1767. _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  1768. {
  1769. struct mpt3_diag_unregister karg;
  1770. void *request_data;
  1771. dma_addr_t request_data_dma;
  1772. u32 request_data_sz;
  1773. u8 buffer_type;
  1774. if (copy_from_user(&karg, arg, sizeof(karg))) {
  1775. pr_err("failure at %s:%d/%s()!\n",
  1776. __FILE__, __LINE__, __func__);
  1777. return -EFAULT;
  1778. }
  1779. dctlprintk(ioc, ioc_info(ioc, "%s\n",
  1780. __func__));
  1781. buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
  1782. if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
  1783. ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
  1784. __func__, karg.unique_id);
  1785. return -EINVAL;
  1786. }
  1787. if (!_ctl_diag_capability(ioc, buffer_type)) {
  1788. ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
  1789. __func__, buffer_type);
  1790. return -EPERM;
  1791. }
  1792. if ((ioc->diag_buffer_status[buffer_type] &
  1793. MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
  1794. ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
  1795. __func__, buffer_type);
  1796. return -EINVAL;
  1797. }
  1798. if ((ioc->diag_buffer_status[buffer_type] &
  1799. MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
  1800. ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
  1801. __func__, buffer_type);
  1802. return -EINVAL;
  1803. }
  1804. if (karg.unique_id != ioc->unique_id[buffer_type]) {
  1805. ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
  1806. __func__, karg.unique_id);
  1807. return -EINVAL;
  1808. }
  1809. request_data = ioc->diag_buffer[buffer_type];
  1810. if (!request_data) {
  1811. ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
  1812. __func__, buffer_type);
  1813. return -ENOMEM;
  1814. }
  1815. if (ioc->diag_buffer_status[buffer_type] &
  1816. MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
  1817. ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID;
  1818. ioc->diag_buffer_status[buffer_type] &=
  1819. ~MPT3_DIAG_BUFFER_IS_APP_OWNED;
  1820. ioc->diag_buffer_status[buffer_type] &=
  1821. ~MPT3_DIAG_BUFFER_IS_REGISTERED;
  1822. } else {
  1823. request_data_sz = ioc->diag_buffer_sz[buffer_type];
  1824. request_data_dma = ioc->diag_buffer_dma[buffer_type];
  1825. dma_free_coherent(&ioc->pdev->dev, request_data_sz,
  1826. request_data, request_data_dma);
  1827. ioc->diag_buffer[buffer_type] = NULL;
  1828. ioc->diag_buffer_status[buffer_type] = 0;
  1829. }
  1830. return 0;
  1831. }
  1832. /**
  1833. * _ctl_diag_query - query relevant info associated with diag buffers
  1834. * @ioc: per adapter object
  1835. * @arg: user space buffer containing ioctl content
  1836. *
  1837. * The application will send only buffer_type and unique_id. Driver will
  1838. * inspect unique_id first, if valid, fill in all the info. If unique_id is
  1839. * 0x00, the driver will return info specified by Buffer Type.
  1840. */
  1841. static long
  1842. _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  1843. {
  1844. struct mpt3_diag_query karg;
  1845. void *request_data;
  1846. int i;
  1847. u8 buffer_type;
  1848. if (copy_from_user(&karg, arg, sizeof(karg))) {
  1849. pr_err("failure at %s:%d/%s()!\n",
  1850. __FILE__, __LINE__, __func__);
  1851. return -EFAULT;
  1852. }
  1853. dctlprintk(ioc, ioc_info(ioc, "%s\n",
  1854. __func__));
  1855. karg.application_flags = 0;
  1856. buffer_type = karg.buffer_type;
  1857. if (!_ctl_diag_capability(ioc, buffer_type)) {
  1858. ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
  1859. __func__, buffer_type);
  1860. return -EPERM;
  1861. }
  1862. if (!(ioc->diag_buffer_status[buffer_type] &
  1863. MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) {
  1864. if ((ioc->diag_buffer_status[buffer_type] &
  1865. MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
  1866. ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
  1867. __func__, buffer_type);
  1868. return -EINVAL;
  1869. }
  1870. }
  1871. if (karg.unique_id) {
  1872. if (karg.unique_id != ioc->unique_id[buffer_type]) {
  1873. ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
  1874. __func__, karg.unique_id);
  1875. return -EINVAL;
  1876. }
  1877. }
  1878. request_data = ioc->diag_buffer[buffer_type];
  1879. if (!request_data) {
  1880. ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
  1881. __func__, buffer_type);
  1882. return -ENOMEM;
  1883. }
  1884. if ((ioc->diag_buffer_status[buffer_type] &
  1885. MPT3_DIAG_BUFFER_IS_REGISTERED))
  1886. karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID;
  1887. if (!(ioc->diag_buffer_status[buffer_type] &
  1888. MPT3_DIAG_BUFFER_IS_RELEASED))
  1889. karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS;
  1890. if (!(ioc->diag_buffer_status[buffer_type] &
  1891. MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED))
  1892. karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC;
  1893. if ((ioc->diag_buffer_status[buffer_type] &
  1894. MPT3_DIAG_BUFFER_IS_APP_OWNED))
  1895. karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED;
  1896. for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
  1897. karg.product_specific[i] =
  1898. ioc->product_specific[buffer_type][i];
  1899. karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
  1900. karg.driver_added_buffer_size = 0;
  1901. karg.unique_id = ioc->unique_id[buffer_type];
  1902. karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
  1903. if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
  1904. ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
  1905. __func__, arg);
  1906. return -EFAULT;
  1907. }
  1908. return 0;
  1909. }
  1910. /**
  1911. * mpt3sas_send_diag_release - Diag Release Message
  1912. * @ioc: per adapter object
  1913. * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
  1914. * @issue_reset: specifies whether host reset is required.
  1915. *
  1916. */
  1917. int
  1918. mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
  1919. u8 *issue_reset)
  1920. {
  1921. Mpi2DiagReleaseRequest_t *mpi_request;
  1922. Mpi2DiagReleaseReply_t *mpi_reply;
  1923. u16 smid;
  1924. u16 ioc_status;
  1925. u32 ioc_state;
  1926. int rc;
  1927. u8 reset_needed = 0;
  1928. dctlprintk(ioc, ioc_info(ioc, "%s\n",
  1929. __func__));
  1930. rc = 0;
  1931. *issue_reset = 0;
  1932. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  1933. if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  1934. if (ioc->diag_buffer_status[buffer_type] &
  1935. MPT3_DIAG_BUFFER_IS_REGISTERED)
  1936. ioc->diag_buffer_status[buffer_type] |=
  1937. MPT3_DIAG_BUFFER_IS_RELEASED;
  1938. dctlprintk(ioc,
  1939. ioc_info(ioc, "%s: skipping due to FAULT state\n",
  1940. __func__));
  1941. rc = -EAGAIN;
  1942. goto out;
  1943. }
  1944. if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
  1945. ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
  1946. rc = -EAGAIN;
  1947. goto out;
  1948. }
  1949. smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
  1950. if (!smid) {
  1951. ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
  1952. rc = -EAGAIN;
  1953. goto out;
  1954. }
  1955. ioc->ctl_cmds.status = MPT3_CMD_PENDING;
  1956. memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
  1957. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  1958. memset(mpi_request, 0, ioc->request_sz);
  1959. ioc->ctl_cmds.smid = smid;
  1960. mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
  1961. mpi_request->BufferType = buffer_type;
  1962. mpi_request->VF_ID = 0; /* TODO */
  1963. mpi_request->VP_ID = 0;
  1964. init_completion(&ioc->ctl_cmds.done);
  1965. ioc->put_smid_default(ioc, smid);
  1966. wait_for_completion_timeout(&ioc->ctl_cmds.done,
  1967. MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
  1968. if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
  1969. mpt3sas_check_cmd_timeout(ioc,
  1970. ioc->ctl_cmds.status, mpi_request,
  1971. sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
  1972. *issue_reset = reset_needed;
  1973. rc = -EFAULT;
  1974. goto out;
  1975. }
  1976. /* process the completed Reply Message Frame */
  1977. if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
  1978. ioc_err(ioc, "%s: no reply message\n", __func__);
  1979. rc = -EFAULT;
  1980. goto out;
  1981. }
  1982. mpi_reply = ioc->ctl_cmds.reply;
  1983. ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
  1984. if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
  1985. ioc->diag_buffer_status[buffer_type] |=
  1986. MPT3_DIAG_BUFFER_IS_RELEASED;
  1987. dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
  1988. } else {
  1989. ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
  1990. __func__,
  1991. ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
  1992. rc = -EFAULT;
  1993. }
  1994. out:
  1995. ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
  1996. return rc;
  1997. }
  1998. /**
  1999. * _ctl_diag_release - request to send Diag Release Message to firmware
  2000. * @ioc: ?
  2001. * @arg: user space buffer containing ioctl content
  2002. *
  2003. * This allows ownership of the specified buffer to returned to the driver,
  2004. * allowing an application to read the buffer without fear that firmware is
  2005. * overwriting information in the buffer.
  2006. */
  2007. static long
  2008. _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  2009. {
  2010. struct mpt3_diag_release karg;
  2011. void *request_data;
  2012. int rc;
  2013. u8 buffer_type;
  2014. u8 issue_reset = 0;
  2015. if (copy_from_user(&karg, arg, sizeof(karg))) {
  2016. pr_err("failure at %s:%d/%s()!\n",
  2017. __FILE__, __LINE__, __func__);
  2018. return -EFAULT;
  2019. }
  2020. dctlprintk(ioc, ioc_info(ioc, "%s\n",
  2021. __func__));
  2022. buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
  2023. if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
  2024. ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
  2025. __func__, karg.unique_id);
  2026. return -EINVAL;
  2027. }
  2028. if (!_ctl_diag_capability(ioc, buffer_type)) {
  2029. ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
  2030. __func__, buffer_type);
  2031. return -EPERM;
  2032. }
  2033. if ((ioc->diag_buffer_status[buffer_type] &
  2034. MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
  2035. ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
  2036. __func__, buffer_type);
  2037. return -EINVAL;
  2038. }
  2039. if (karg.unique_id != ioc->unique_id[buffer_type]) {
  2040. ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
  2041. __func__, karg.unique_id);
  2042. return -EINVAL;
  2043. }
  2044. if (ioc->diag_buffer_status[buffer_type] &
  2045. MPT3_DIAG_BUFFER_IS_RELEASED) {
  2046. ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
  2047. __func__, buffer_type);
  2048. return -EINVAL;
  2049. }
  2050. request_data = ioc->diag_buffer[buffer_type];
  2051. if (!request_data) {
  2052. ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
  2053. __func__, buffer_type);
  2054. return -ENOMEM;
  2055. }
  2056. /* buffers were released by due to host reset */
  2057. if ((ioc->diag_buffer_status[buffer_type] &
  2058. MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
  2059. ioc->diag_buffer_status[buffer_type] |=
  2060. MPT3_DIAG_BUFFER_IS_RELEASED;
  2061. ioc->diag_buffer_status[buffer_type] &=
  2062. ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
  2063. ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
  2064. __func__, buffer_type);
  2065. return 0;
  2066. }
  2067. rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
  2068. if (issue_reset)
  2069. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  2070. return rc;
  2071. }
  2072. /**
  2073. * _ctl_diag_read_buffer - request for copy of the diag buffer
  2074. * @ioc: per adapter object
  2075. * @arg: user space buffer containing ioctl content
  2076. */
  2077. static long
  2078. _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  2079. {
  2080. struct mpt3_diag_read_buffer karg;
  2081. struct mpt3_diag_read_buffer __user *uarg = arg;
  2082. void *request_data, *diag_data;
  2083. Mpi2DiagBufferPostRequest_t *mpi_request;
  2084. Mpi2DiagBufferPostReply_t *mpi_reply;
  2085. int rc, i;
  2086. u8 buffer_type;
  2087. unsigned long request_size, copy_size;
  2088. u16 smid;
  2089. u16 ioc_status;
  2090. u8 issue_reset = 0;
  2091. if (copy_from_user(&karg, arg, sizeof(karg))) {
  2092. pr_err("failure at %s:%d/%s()!\n",
  2093. __FILE__, __LINE__, __func__);
  2094. return -EFAULT;
  2095. }
  2096. dctlprintk(ioc, ioc_info(ioc, "%s\n",
  2097. __func__));
  2098. buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
  2099. if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
  2100. ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
  2101. __func__, karg.unique_id);
  2102. return -EINVAL;
  2103. }
  2104. if (!_ctl_diag_capability(ioc, buffer_type)) {
  2105. ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
  2106. __func__, buffer_type);
  2107. return -EPERM;
  2108. }
  2109. if (karg.unique_id != ioc->unique_id[buffer_type]) {
  2110. ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
  2111. __func__, karg.unique_id);
  2112. return -EINVAL;
  2113. }
  2114. request_data = ioc->diag_buffer[buffer_type];
  2115. if (!request_data) {
  2116. ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
  2117. __func__, buffer_type);
  2118. return -ENOMEM;
  2119. }
  2120. request_size = ioc->diag_buffer_sz[buffer_type];
  2121. if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
  2122. ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
  2123. __func__);
  2124. return -EINVAL;
  2125. }
  2126. if (karg.starting_offset > request_size)
  2127. return -EINVAL;
  2128. diag_data = (void *)(request_data + karg.starting_offset);
  2129. dctlprintk(ioc,
  2130. ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
  2131. __func__, diag_data, karg.starting_offset,
  2132. karg.bytes_to_read));
  2133. /* Truncate data on requests that are too large */
  2134. if ((diag_data + karg.bytes_to_read < diag_data) ||
  2135. (diag_data + karg.bytes_to_read > request_data + request_size))
  2136. copy_size = request_size - karg.starting_offset;
  2137. else
  2138. copy_size = karg.bytes_to_read;
  2139. if (copy_to_user((void __user *)uarg->diagnostic_data,
  2140. diag_data, copy_size)) {
  2141. ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
  2142. __func__, diag_data);
  2143. return -EFAULT;
  2144. }
  2145. if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
  2146. return 0;
  2147. dctlprintk(ioc,
  2148. ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
  2149. __func__, buffer_type));
  2150. if ((ioc->diag_buffer_status[buffer_type] &
  2151. MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
  2152. dctlprintk(ioc,
  2153. ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
  2154. __func__, buffer_type));
  2155. return 0;
  2156. }
  2157. /* Get a free request frame and save the message context.
  2158. */
  2159. if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
  2160. ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
  2161. rc = -EAGAIN;
  2162. goto out;
  2163. }
  2164. smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
  2165. if (!smid) {
  2166. ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
  2167. rc = -EAGAIN;
  2168. goto out;
  2169. }
  2170. rc = 0;
  2171. ioc->ctl_cmds.status = MPT3_CMD_PENDING;
  2172. memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
  2173. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  2174. memset(mpi_request, 0, ioc->request_sz);
  2175. ioc->ctl_cmds.smid = smid;
  2176. mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
  2177. mpi_request->BufferType = buffer_type;
  2178. mpi_request->BufferLength =
  2179. cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
  2180. mpi_request->BufferAddress =
  2181. cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
  2182. for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
  2183. mpi_request->ProductSpecific[i] =
  2184. cpu_to_le32(ioc->product_specific[buffer_type][i]);
  2185. mpi_request->VF_ID = 0; /* TODO */
  2186. mpi_request->VP_ID = 0;
  2187. init_completion(&ioc->ctl_cmds.done);
  2188. ioc->put_smid_default(ioc, smid);
  2189. wait_for_completion_timeout(&ioc->ctl_cmds.done,
  2190. MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
  2191. if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
  2192. mpt3sas_check_cmd_timeout(ioc,
  2193. ioc->ctl_cmds.status, mpi_request,
  2194. sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
  2195. goto issue_host_reset;
  2196. }
  2197. /* process the completed Reply Message Frame */
  2198. if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
  2199. ioc_err(ioc, "%s: no reply message\n", __func__);
  2200. rc = -EFAULT;
  2201. goto out;
  2202. }
  2203. mpi_reply = ioc->ctl_cmds.reply;
  2204. ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
  2205. if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
  2206. ioc->diag_buffer_status[buffer_type] |=
  2207. MPT3_DIAG_BUFFER_IS_REGISTERED;
  2208. ioc->diag_buffer_status[buffer_type] &=
  2209. ~MPT3_DIAG_BUFFER_IS_RELEASED;
  2210. dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
  2211. } else {
  2212. ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
  2213. __func__, ioc_status,
  2214. le32_to_cpu(mpi_reply->IOCLogInfo));
  2215. rc = -EFAULT;
  2216. }
  2217. issue_host_reset:
  2218. if (issue_reset)
  2219. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  2220. out:
  2221. ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
  2222. return rc;
  2223. }
  2224. /**
  2225. * _ctl_addnl_diag_query - query relevant info associated with diag buffers
  2226. * @ioc: per adapter object
  2227. * @arg: user space buffer containing ioctl content
  2228. *
  2229. * The application will send only unique_id. Driver will
  2230. * inspect unique_id first, if valid, fill the details related to cause
  2231. * for diag buffer release.
  2232. */
  2233. static long
  2234. _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
  2235. {
  2236. struct mpt3_addnl_diag_query karg;
  2237. u32 buffer_type = 0;
  2238. if (copy_from_user(&karg, arg, sizeof(karg))) {
  2239. pr_err("%s: failure at %s:%d/%s()!\n",
  2240. ioc->name, __FILE__, __LINE__, __func__);
  2241. return -EFAULT;
  2242. }
  2243. dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__));
  2244. if (karg.unique_id == 0) {
  2245. ioc_err(ioc, "%s: unique_id is(0x%08x)\n",
  2246. __func__, karg.unique_id);
  2247. return -EPERM;
  2248. }
  2249. buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
  2250. if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
  2251. ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
  2252. __func__, karg.unique_id);
  2253. return -EPERM;
  2254. }
  2255. memset(&karg.rel_query, 0, sizeof(karg.rel_query));
  2256. if ((ioc->diag_buffer_status[buffer_type] &
  2257. MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
  2258. ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
  2259. __func__, buffer_type);
  2260. goto out;
  2261. }
  2262. if ((ioc->diag_buffer_status[buffer_type] &
  2263. MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
  2264. ioc_err(ioc, "%s: buffer_type(0x%02x) is not released\n",
  2265. __func__, buffer_type);
  2266. return -EPERM;
  2267. }
  2268. memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
  2269. out:
  2270. if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
  2271. ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
  2272. __func__, arg);
  2273. return -EFAULT;
  2274. }
  2275. return 0;
  2276. }
  2277. #ifdef CONFIG_COMPAT
  2278. /**
  2279. * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
  2280. * @ioc: per adapter object
  2281. * @cmd: ioctl opcode
  2282. * @arg: (struct mpt3_ioctl_command32)
  2283. *
  2284. * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
  2285. */
  2286. static long
  2287. _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
  2288. void __user *arg)
  2289. {
  2290. struct mpt3_ioctl_command32 karg32;
  2291. struct mpt3_ioctl_command32 __user *uarg;
  2292. struct mpt3_ioctl_command karg;
  2293. if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
  2294. return -EINVAL;
  2295. uarg = (struct mpt3_ioctl_command32 __user *) arg;
  2296. if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
  2297. pr_err("failure at %s:%d/%s()!\n",
  2298. __FILE__, __LINE__, __func__);
  2299. return -EFAULT;
  2300. }
  2301. memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
  2302. karg.hdr.ioc_number = karg32.hdr.ioc_number;
  2303. karg.hdr.port_number = karg32.hdr.port_number;
  2304. karg.hdr.max_data_size = karg32.hdr.max_data_size;
  2305. karg.timeout = karg32.timeout;
  2306. karg.max_reply_bytes = karg32.max_reply_bytes;
  2307. karg.data_in_size = karg32.data_in_size;
  2308. karg.data_out_size = karg32.data_out_size;
  2309. karg.max_sense_bytes = karg32.max_sense_bytes;
  2310. karg.data_sge_offset = karg32.data_sge_offset;
  2311. karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
  2312. karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
  2313. karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
  2314. karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
  2315. return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
  2316. }
  2317. #endif
  2318. /**
  2319. * _ctl_ioctl_main - main ioctl entry point
  2320. * @file: (struct file)
  2321. * @cmd: ioctl opcode
  2322. * @arg: user space data buffer
  2323. * @compat: handles 32 bit applications in 64bit os
  2324. * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
  2325. * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
  2326. */
  2327. static long
  2328. _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
  2329. u8 compat, u16 mpi_version)
  2330. {
  2331. struct MPT3SAS_ADAPTER *ioc;
  2332. struct mpt3_ioctl_header ioctl_header;
  2333. enum block_state state;
  2334. long ret = -ENOIOCTLCMD;
  2335. /* get IOCTL header */
  2336. if (copy_from_user(&ioctl_header, (char __user *)arg,
  2337. sizeof(struct mpt3_ioctl_header))) {
  2338. pr_err("failure at %s:%d/%s()!\n",
  2339. __FILE__, __LINE__, __func__);
  2340. return -EFAULT;
  2341. }
  2342. if (_ctl_verify_adapter(ioctl_header.ioc_number,
  2343. &ioc, mpi_version) == -1 || !ioc)
  2344. return -ENODEV;
  2345. /* pci_access_mutex lock acquired by ioctl path */
  2346. mutex_lock(&ioc->pci_access_mutex);
  2347. if (ioc->shost_recovery || ioc->pci_error_recovery ||
  2348. ioc->is_driver_loading || ioc->remove_host) {
  2349. ret = -EAGAIN;
  2350. goto out_unlock_pciaccess;
  2351. }
  2352. state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
  2353. if (state == NON_BLOCKING) {
  2354. if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
  2355. ret = -EAGAIN;
  2356. goto out_unlock_pciaccess;
  2357. }
  2358. } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
  2359. ret = -ERESTARTSYS;
  2360. goto out_unlock_pciaccess;
  2361. }
  2362. switch (cmd) {
  2363. case MPT3IOCINFO:
  2364. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
  2365. ret = _ctl_getiocinfo(ioc, arg);
  2366. break;
  2367. #ifdef CONFIG_COMPAT
  2368. case MPT3COMMAND32:
  2369. #endif
  2370. case MPT3COMMAND:
  2371. {
  2372. struct mpt3_ioctl_command __user *uarg;
  2373. struct mpt3_ioctl_command karg;
  2374. #ifdef CONFIG_COMPAT
  2375. if (compat) {
  2376. ret = _ctl_compat_mpt_command(ioc, cmd, arg);
  2377. break;
  2378. }
  2379. #endif
  2380. if (copy_from_user(&karg, arg, sizeof(karg))) {
  2381. pr_err("failure at %s:%d/%s()!\n",
  2382. __FILE__, __LINE__, __func__);
  2383. ret = -EFAULT;
  2384. break;
  2385. }
  2386. if (karg.hdr.ioc_number != ioctl_header.ioc_number) {
  2387. ret = -EINVAL;
  2388. break;
  2389. }
  2390. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
  2391. uarg = arg;
  2392. ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
  2393. }
  2394. break;
  2395. }
  2396. case MPT3EVENTQUERY:
  2397. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
  2398. ret = _ctl_eventquery(ioc, arg);
  2399. break;
  2400. case MPT3EVENTENABLE:
  2401. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
  2402. ret = _ctl_eventenable(ioc, arg);
  2403. break;
  2404. case MPT3EVENTREPORT:
  2405. ret = _ctl_eventreport(ioc, arg);
  2406. break;
  2407. case MPT3HARDRESET:
  2408. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
  2409. ret = _ctl_do_reset(ioc, arg);
  2410. break;
  2411. case MPT3BTDHMAPPING:
  2412. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
  2413. ret = _ctl_btdh_mapping(ioc, arg);
  2414. break;
  2415. case MPT3DIAGREGISTER:
  2416. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
  2417. ret = _ctl_diag_register(ioc, arg);
  2418. break;
  2419. case MPT3DIAGUNREGISTER:
  2420. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
  2421. ret = _ctl_diag_unregister(ioc, arg);
  2422. break;
  2423. case MPT3DIAGQUERY:
  2424. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
  2425. ret = _ctl_diag_query(ioc, arg);
  2426. break;
  2427. case MPT3DIAGRELEASE:
  2428. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
  2429. ret = _ctl_diag_release(ioc, arg);
  2430. break;
  2431. case MPT3DIAGREADBUFFER:
  2432. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
  2433. ret = _ctl_diag_read_buffer(ioc, arg);
  2434. break;
  2435. case MPT3ADDNLDIAGQUERY:
  2436. if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query))
  2437. ret = _ctl_addnl_diag_query(ioc, arg);
  2438. break;
  2439. default:
  2440. dctlprintk(ioc,
  2441. ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
  2442. cmd));
  2443. break;
  2444. }
  2445. mutex_unlock(&ioc->ctl_cmds.mutex);
  2446. out_unlock_pciaccess:
  2447. mutex_unlock(&ioc->pci_access_mutex);
  2448. return ret;
  2449. }
  2450. /**
  2451. * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
  2452. * @file: (struct file)
  2453. * @cmd: ioctl opcode
  2454. * @arg: ?
  2455. */
  2456. static long
  2457. _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  2458. {
  2459. long ret;
  2460. /* pass MPI25_VERSION | MPI26_VERSION value,
  2461. * to indicate that this ioctl cmd
  2462. * came from mpt3ctl ioctl device.
  2463. */
  2464. ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
  2465. MPI25_VERSION | MPI26_VERSION);
  2466. return ret;
  2467. }
  2468. /**
  2469. * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
  2470. * @file: (struct file)
  2471. * @cmd: ioctl opcode
  2472. * @arg: ?
  2473. */
  2474. static long
  2475. _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  2476. {
  2477. long ret;
  2478. /* pass MPI2_VERSION value, to indicate that this ioctl cmd
  2479. * came from mpt2ctl ioctl device.
  2480. */
  2481. ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION);
  2482. return ret;
  2483. }
  2484. #ifdef CONFIG_COMPAT
  2485. /**
  2486. * _ctl_ioctl_compat - main ioctl entry point (compat)
  2487. * @file: ?
  2488. * @cmd: ?
  2489. * @arg: ?
  2490. *
  2491. * This routine handles 32 bit applications in 64bit os.
  2492. */
  2493. static long
  2494. _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
  2495. {
  2496. long ret;
  2497. ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
  2498. MPI25_VERSION | MPI26_VERSION);
  2499. return ret;
  2500. }
  2501. /**
  2502. * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
  2503. * @file: ?
  2504. * @cmd: ?
  2505. * @arg: ?
  2506. *
  2507. * This routine handles 32 bit applications in 64bit os.
  2508. */
  2509. static long
  2510. _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
  2511. {
  2512. long ret;
  2513. ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION);
  2514. return ret;
  2515. }
  2516. #endif
  2517. /* scsi host attributes */
  2518. /**
  2519. * version_fw_show - firmware version
  2520. * @cdev: pointer to embedded class device
  2521. * @attr: ?
  2522. * @buf: the buffer returned
  2523. *
  2524. * A sysfs 'read-only' shost attribute.
  2525. */
  2526. static ssize_t
  2527. version_fw_show(struct device *cdev, struct device_attribute *attr,
  2528. char *buf)
  2529. {
  2530. struct Scsi_Host *shost = class_to_shost(cdev);
  2531. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2532. return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
  2533. (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
  2534. (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
  2535. (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
  2536. ioc->facts.FWVersion.Word & 0x000000FF);
  2537. }
  2538. static DEVICE_ATTR_RO(version_fw);
  2539. /**
  2540. * version_bios_show - bios version
  2541. * @cdev: pointer to embedded class device
  2542. * @attr: ?
  2543. * @buf: the buffer returned
  2544. *
  2545. * A sysfs 'read-only' shost attribute.
  2546. */
  2547. static ssize_t
  2548. version_bios_show(struct device *cdev, struct device_attribute *attr,
  2549. char *buf)
  2550. {
  2551. struct Scsi_Host *shost = class_to_shost(cdev);
  2552. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2553. u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
  2554. return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
  2555. (version & 0xFF000000) >> 24,
  2556. (version & 0x00FF0000) >> 16,
  2557. (version & 0x0000FF00) >> 8,
  2558. version & 0x000000FF);
  2559. }
  2560. static DEVICE_ATTR_RO(version_bios);
  2561. /**
  2562. * version_mpi_show - MPI (message passing interface) version
  2563. * @cdev: pointer to embedded class device
  2564. * @attr: ?
  2565. * @buf: the buffer returned
  2566. *
  2567. * A sysfs 'read-only' shost attribute.
  2568. */
  2569. static ssize_t
  2570. version_mpi_show(struct device *cdev, struct device_attribute *attr,
  2571. char *buf)
  2572. {
  2573. struct Scsi_Host *shost = class_to_shost(cdev);
  2574. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2575. return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
  2576. ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
  2577. }
  2578. static DEVICE_ATTR_RO(version_mpi);
  2579. /**
  2580. * version_product_show - product name
  2581. * @cdev: pointer to embedded class device
  2582. * @attr: ?
  2583. * @buf: the buffer returned
  2584. *
  2585. * A sysfs 'read-only' shost attribute.
  2586. */
  2587. static ssize_t
  2588. version_product_show(struct device *cdev, struct device_attribute *attr,
  2589. char *buf)
  2590. {
  2591. struct Scsi_Host *shost = class_to_shost(cdev);
  2592. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2593. return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
  2594. }
  2595. static DEVICE_ATTR_RO(version_product);
  2596. /**
  2597. * version_nvdata_persistent_show - ndvata persistent version
  2598. * @cdev: pointer to embedded class device
  2599. * @attr: ?
  2600. * @buf: the buffer returned
  2601. *
  2602. * A sysfs 'read-only' shost attribute.
  2603. */
  2604. static ssize_t
  2605. version_nvdata_persistent_show(struct device *cdev,
  2606. struct device_attribute *attr, char *buf)
  2607. {
  2608. struct Scsi_Host *shost = class_to_shost(cdev);
  2609. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2610. return snprintf(buf, PAGE_SIZE, "%08xh\n",
  2611. le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
  2612. }
  2613. static DEVICE_ATTR_RO(version_nvdata_persistent);
  2614. /**
  2615. * version_nvdata_default_show - nvdata default version
  2616. * @cdev: pointer to embedded class device
  2617. * @attr: ?
  2618. * @buf: the buffer returned
  2619. *
  2620. * A sysfs 'read-only' shost attribute.
  2621. */
  2622. static ssize_t
  2623. version_nvdata_default_show(struct device *cdev, struct device_attribute
  2624. *attr, char *buf)
  2625. {
  2626. struct Scsi_Host *shost = class_to_shost(cdev);
  2627. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2628. return snprintf(buf, PAGE_SIZE, "%08xh\n",
  2629. le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
  2630. }
  2631. static DEVICE_ATTR_RO(version_nvdata_default);
  2632. /**
  2633. * board_name_show - board name
  2634. * @cdev: pointer to embedded class device
  2635. * @attr: ?
  2636. * @buf: the buffer returned
  2637. *
  2638. * A sysfs 'read-only' shost attribute.
  2639. */
  2640. static ssize_t
  2641. board_name_show(struct device *cdev, struct device_attribute *attr,
  2642. char *buf)
  2643. {
  2644. struct Scsi_Host *shost = class_to_shost(cdev);
  2645. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2646. return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
  2647. }
  2648. static DEVICE_ATTR_RO(board_name);
  2649. /**
  2650. * board_assembly_show - board assembly name
  2651. * @cdev: pointer to embedded class device
  2652. * @attr: ?
  2653. * @buf: the buffer returned
  2654. *
  2655. * A sysfs 'read-only' shost attribute.
  2656. */
  2657. static ssize_t
  2658. board_assembly_show(struct device *cdev, struct device_attribute *attr,
  2659. char *buf)
  2660. {
  2661. struct Scsi_Host *shost = class_to_shost(cdev);
  2662. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2663. return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
  2664. }
  2665. static DEVICE_ATTR_RO(board_assembly);
  2666. /**
  2667. * board_tracer_show - board tracer number
  2668. * @cdev: pointer to embedded class device
  2669. * @attr: ?
  2670. * @buf: the buffer returned
  2671. *
  2672. * A sysfs 'read-only' shost attribute.
  2673. */
  2674. static ssize_t
  2675. board_tracer_show(struct device *cdev, struct device_attribute *attr,
  2676. char *buf)
  2677. {
  2678. struct Scsi_Host *shost = class_to_shost(cdev);
  2679. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2680. return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
  2681. }
  2682. static DEVICE_ATTR_RO(board_tracer);
  2683. /**
  2684. * io_delay_show - io missing delay
  2685. * @cdev: pointer to embedded class device
  2686. * @attr: ?
  2687. * @buf: the buffer returned
  2688. *
  2689. * This is for firmware implemention for deboucing device
  2690. * removal events.
  2691. *
  2692. * A sysfs 'read-only' shost attribute.
  2693. */
  2694. static ssize_t
  2695. io_delay_show(struct device *cdev, struct device_attribute *attr,
  2696. char *buf)
  2697. {
  2698. struct Scsi_Host *shost = class_to_shost(cdev);
  2699. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2700. return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
  2701. }
  2702. static DEVICE_ATTR_RO(io_delay);
  2703. /**
  2704. * device_delay_show - device missing delay
  2705. * @cdev: pointer to embedded class device
  2706. * @attr: ?
  2707. * @buf: the buffer returned
  2708. *
  2709. * This is for firmware implemention for deboucing device
  2710. * removal events.
  2711. *
  2712. * A sysfs 'read-only' shost attribute.
  2713. */
  2714. static ssize_t
  2715. device_delay_show(struct device *cdev, struct device_attribute *attr,
  2716. char *buf)
  2717. {
  2718. struct Scsi_Host *shost = class_to_shost(cdev);
  2719. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2720. return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
  2721. }
  2722. static DEVICE_ATTR_RO(device_delay);
  2723. /**
  2724. * fw_queue_depth_show - global credits
  2725. * @cdev: pointer to embedded class device
  2726. * @attr: ?
  2727. * @buf: the buffer returned
  2728. *
  2729. * This is firmware queue depth limit
  2730. *
  2731. * A sysfs 'read-only' shost attribute.
  2732. */
  2733. static ssize_t
  2734. fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
  2735. char *buf)
  2736. {
  2737. struct Scsi_Host *shost = class_to_shost(cdev);
  2738. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2739. return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
  2740. }
  2741. static DEVICE_ATTR_RO(fw_queue_depth);
  2742. /**
  2743. * host_sas_address_show - sas address
  2744. * @cdev: pointer to embedded class device
  2745. * @attr: ?
  2746. * @buf: the buffer returned
  2747. *
  2748. * This is the controller sas address
  2749. *
  2750. * A sysfs 'read-only' shost attribute.
  2751. */
  2752. static ssize_t
  2753. host_sas_address_show(struct device *cdev, struct device_attribute *attr,
  2754. char *buf)
  2755. {
  2756. struct Scsi_Host *shost = class_to_shost(cdev);
  2757. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2758. return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
  2759. (unsigned long long)ioc->sas_hba.sas_address);
  2760. }
  2761. static DEVICE_ATTR_RO(host_sas_address);
  2762. /**
  2763. * logging_level_show - logging level
  2764. * @cdev: pointer to embedded class device
  2765. * @attr: ?
  2766. * @buf: the buffer returned
  2767. *
  2768. * A sysfs 'read/write' shost attribute.
  2769. */
  2770. static ssize_t
  2771. logging_level_show(struct device *cdev, struct device_attribute *attr,
  2772. char *buf)
  2773. {
  2774. struct Scsi_Host *shost = class_to_shost(cdev);
  2775. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2776. return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
  2777. }
  2778. static ssize_t
  2779. logging_level_store(struct device *cdev, struct device_attribute *attr,
  2780. const char *buf, size_t count)
  2781. {
  2782. struct Scsi_Host *shost = class_to_shost(cdev);
  2783. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2784. int val = 0;
  2785. if (sscanf(buf, "%x", &val) != 1)
  2786. return -EINVAL;
  2787. ioc->logging_level = val;
  2788. ioc_info(ioc, "logging_level=%08xh\n",
  2789. ioc->logging_level);
  2790. return strlen(buf);
  2791. }
  2792. static DEVICE_ATTR_RW(logging_level);
  2793. /**
  2794. * fwfault_debug_show - show/store fwfault_debug
  2795. * @cdev: pointer to embedded class device
  2796. * @attr: ?
  2797. * @buf: the buffer returned
  2798. *
  2799. * mpt3sas_fwfault_debug is command line option
  2800. * A sysfs 'read/write' shost attribute.
  2801. */
  2802. static ssize_t
  2803. fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
  2804. char *buf)
  2805. {
  2806. struct Scsi_Host *shost = class_to_shost(cdev);
  2807. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2808. return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
  2809. }
  2810. static ssize_t
  2811. fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
  2812. const char *buf, size_t count)
  2813. {
  2814. struct Scsi_Host *shost = class_to_shost(cdev);
  2815. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2816. int val = 0;
  2817. if (sscanf(buf, "%d", &val) != 1)
  2818. return -EINVAL;
  2819. ioc->fwfault_debug = val;
  2820. ioc_info(ioc, "fwfault_debug=%d\n",
  2821. ioc->fwfault_debug);
  2822. return strlen(buf);
  2823. }
  2824. static DEVICE_ATTR_RW(fwfault_debug);
  2825. /**
  2826. * ioc_reset_count_show - ioc reset count
  2827. * @cdev: pointer to embedded class device
  2828. * @attr: ?
  2829. * @buf: the buffer returned
  2830. *
  2831. * This is firmware queue depth limit
  2832. *
  2833. * A sysfs 'read-only' shost attribute.
  2834. */
  2835. static ssize_t
  2836. ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
  2837. char *buf)
  2838. {
  2839. struct Scsi_Host *shost = class_to_shost(cdev);
  2840. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2841. return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
  2842. }
  2843. static DEVICE_ATTR_RO(ioc_reset_count);
  2844. /**
  2845. * reply_queue_count_show - number of reply queues
  2846. * @cdev: pointer to embedded class device
  2847. * @attr: ?
  2848. * @buf: the buffer returned
  2849. *
  2850. * This is number of reply queues
  2851. *
  2852. * A sysfs 'read-only' shost attribute.
  2853. */
  2854. static ssize_t
  2855. reply_queue_count_show(struct device *cdev,
  2856. struct device_attribute *attr, char *buf)
  2857. {
  2858. u8 reply_queue_count;
  2859. struct Scsi_Host *shost = class_to_shost(cdev);
  2860. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2861. if ((ioc->facts.IOCCapabilities &
  2862. MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
  2863. reply_queue_count = ioc->reply_queue_count;
  2864. else
  2865. reply_queue_count = 1;
  2866. return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
  2867. }
  2868. static DEVICE_ATTR_RO(reply_queue_count);
  2869. /**
  2870. * BRM_status_show - Backup Rail Monitor Status
  2871. * @cdev: pointer to embedded class device
  2872. * @attr: ?
  2873. * @buf: the buffer returned
  2874. *
  2875. * This is number of reply queues
  2876. *
  2877. * A sysfs 'read-only' shost attribute.
  2878. */
  2879. static ssize_t
  2880. BRM_status_show(struct device *cdev, struct device_attribute *attr,
  2881. char *buf)
  2882. {
  2883. struct Scsi_Host *shost = class_to_shost(cdev);
  2884. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2885. Mpi2IOUnitPage3_t io_unit_pg3;
  2886. Mpi2ConfigReply_t mpi_reply;
  2887. u16 backup_rail_monitor_status = 0;
  2888. u16 ioc_status;
  2889. int sz;
  2890. ssize_t rc = 0;
  2891. if (!ioc->is_warpdrive) {
  2892. ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
  2893. __func__);
  2894. return 0;
  2895. }
  2896. /* pci_access_mutex lock acquired by sysfs show path */
  2897. mutex_lock(&ioc->pci_access_mutex);
  2898. if (ioc->pci_error_recovery || ioc->remove_host)
  2899. goto out;
  2900. sz = sizeof(io_unit_pg3);
  2901. memset(&io_unit_pg3, 0, sz);
  2902. if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) !=
  2903. 0) {
  2904. ioc_err(ioc, "%s: failed reading iounit_pg3\n",
  2905. __func__);
  2906. rc = -EINVAL;
  2907. goto out;
  2908. }
  2909. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  2910. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  2911. ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
  2912. __func__, ioc_status);
  2913. rc = -EINVAL;
  2914. goto out;
  2915. }
  2916. if (io_unit_pg3.GPIOCount < 25) {
  2917. ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n",
  2918. __func__, io_unit_pg3.GPIOCount);
  2919. rc = -EINVAL;
  2920. goto out;
  2921. }
  2922. /* BRM status is in bit zero of GPIOVal[24] */
  2923. backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]);
  2924. rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
  2925. out:
  2926. mutex_unlock(&ioc->pci_access_mutex);
  2927. return rc;
  2928. }
  2929. static DEVICE_ATTR_RO(BRM_status);
  2930. struct DIAG_BUFFER_START {
  2931. __le32 Size;
  2932. __le32 DiagVersion;
  2933. u8 BufferType;
  2934. u8 Reserved[3];
  2935. __le32 Reserved1;
  2936. __le32 Reserved2;
  2937. __le32 Reserved3;
  2938. };
  2939. /**
  2940. * host_trace_buffer_size_show - host buffer size (trace only)
  2941. * @cdev: pointer to embedded class device
  2942. * @attr: ?
  2943. * @buf: the buffer returned
  2944. *
  2945. * A sysfs 'read-only' shost attribute.
  2946. */
  2947. static ssize_t
  2948. host_trace_buffer_size_show(struct device *cdev,
  2949. struct device_attribute *attr, char *buf)
  2950. {
  2951. struct Scsi_Host *shost = class_to_shost(cdev);
  2952. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2953. u32 size = 0;
  2954. struct DIAG_BUFFER_START *request_data;
  2955. if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
  2956. ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
  2957. __func__);
  2958. return 0;
  2959. }
  2960. if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  2961. MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
  2962. ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
  2963. __func__);
  2964. return 0;
  2965. }
  2966. request_data = (struct DIAG_BUFFER_START *)
  2967. ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
  2968. if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
  2969. le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
  2970. le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
  2971. le32_to_cpu(request_data->Reserved3) == 0x4742444c)
  2972. size = le32_to_cpu(request_data->Size);
  2973. ioc->ring_buffer_sz = size;
  2974. return snprintf(buf, PAGE_SIZE, "%d\n", size);
  2975. }
  2976. static DEVICE_ATTR_RO(host_trace_buffer_size);
  2977. /**
  2978. * host_trace_buffer_show - firmware ring buffer (trace only)
  2979. * @cdev: pointer to embedded class device
  2980. * @attr: ?
  2981. * @buf: the buffer returned
  2982. *
  2983. * A sysfs 'read/write' shost attribute.
  2984. *
  2985. * You will only be able to read 4k bytes of ring buffer at a time.
  2986. * In order to read beyond 4k bytes, you will have to write out the
  2987. * offset to the same attribute, it will move the pointer.
  2988. */
  2989. static ssize_t
  2990. host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
  2991. char *buf)
  2992. {
  2993. struct Scsi_Host *shost = class_to_shost(cdev);
  2994. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2995. void *request_data;
  2996. u32 size;
  2997. if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
  2998. ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
  2999. __func__);
  3000. return 0;
  3001. }
  3002. if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  3003. MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
  3004. ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
  3005. __func__);
  3006. return 0;
  3007. }
  3008. if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
  3009. return 0;
  3010. size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
  3011. size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
  3012. request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
  3013. memcpy(buf, request_data, size);
  3014. return size;
  3015. }
  3016. static ssize_t
  3017. host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
  3018. const char *buf, size_t count)
  3019. {
  3020. struct Scsi_Host *shost = class_to_shost(cdev);
  3021. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3022. int val = 0;
  3023. if (sscanf(buf, "%d", &val) != 1)
  3024. return -EINVAL;
  3025. ioc->ring_buffer_offset = val;
  3026. return strlen(buf);
  3027. }
  3028. static DEVICE_ATTR_RW(host_trace_buffer);
  3029. /*****************************************/
  3030. /**
  3031. * host_trace_buffer_enable_show - firmware ring buffer (trace only)
  3032. * @cdev: pointer to embedded class device
  3033. * @attr: ?
  3034. * @buf: the buffer returned
  3035. *
  3036. * A sysfs 'read/write' shost attribute.
  3037. *
  3038. * This is a mechnism to post/release host_trace_buffers
  3039. */
  3040. static ssize_t
  3041. host_trace_buffer_enable_show(struct device *cdev,
  3042. struct device_attribute *attr, char *buf)
  3043. {
  3044. struct Scsi_Host *shost = class_to_shost(cdev);
  3045. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3046. if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
  3047. ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  3048. MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
  3049. return snprintf(buf, PAGE_SIZE, "off\n");
  3050. else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  3051. MPT3_DIAG_BUFFER_IS_RELEASED))
  3052. return snprintf(buf, PAGE_SIZE, "release\n");
  3053. else
  3054. return snprintf(buf, PAGE_SIZE, "post\n");
  3055. }
  3056. static ssize_t
  3057. host_trace_buffer_enable_store(struct device *cdev,
  3058. struct device_attribute *attr, const char *buf, size_t count)
  3059. {
  3060. struct Scsi_Host *shost = class_to_shost(cdev);
  3061. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3062. char str[10] = "";
  3063. struct mpt3_diag_register diag_register;
  3064. u8 issue_reset = 0;
  3065. /* don't allow post/release occurr while recovery is active */
  3066. if (ioc->shost_recovery || ioc->remove_host ||
  3067. ioc->pci_error_recovery || ioc->is_driver_loading)
  3068. return -EBUSY;
  3069. if (sscanf(buf, "%9s", str) != 1)
  3070. return -EINVAL;
  3071. if (!strcmp(str, "post")) {
  3072. /* exit out if host buffers are already posted */
  3073. if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
  3074. (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  3075. MPT3_DIAG_BUFFER_IS_REGISTERED) &&
  3076. ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  3077. MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
  3078. goto out;
  3079. memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
  3080. ioc_info(ioc, "posting host trace buffers\n");
  3081. diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
  3082. if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 &&
  3083. ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) {
  3084. /* post the same buffer allocated previously */
  3085. diag_register.requested_buffer_size =
  3086. ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE];
  3087. } else {
  3088. /*
  3089. * Free the diag buffer memory which was previously
  3090. * allocated by an application.
  3091. */
  3092. if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0)
  3093. &&
  3094. (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  3095. MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
  3096. dma_free_coherent(&ioc->pdev->dev,
  3097. ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE],
  3098. ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
  3099. ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]);
  3100. ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
  3101. NULL;
  3102. }
  3103. diag_register.requested_buffer_size = (1024 * 1024);
  3104. }
  3105. diag_register.unique_id =
  3106. (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
  3107. (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
  3108. ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
  3109. _ctl_diag_register_2(ioc, &diag_register);
  3110. if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  3111. MPT3_DIAG_BUFFER_IS_REGISTERED) {
  3112. ioc_info(ioc,
  3113. "Trace buffer %d KB allocated through sysfs\n",
  3114. diag_register.requested_buffer_size>>10);
  3115. if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
  3116. ioc->diag_buffer_status[
  3117. MPI2_DIAG_BUF_TYPE_TRACE] |=
  3118. MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
  3119. }
  3120. } else if (!strcmp(str, "release")) {
  3121. /* exit out if host buffers are already released */
  3122. if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
  3123. goto out;
  3124. if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  3125. MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
  3126. goto out;
  3127. if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  3128. MPT3_DIAG_BUFFER_IS_RELEASED))
  3129. goto out;
  3130. ioc_info(ioc, "releasing host trace buffer\n");
  3131. ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_SYSFS;
  3132. mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
  3133. &issue_reset);
  3134. }
  3135. out:
  3136. return strlen(buf);
  3137. }
  3138. static DEVICE_ATTR_RW(host_trace_buffer_enable);
  3139. /*********** diagnostic trigger suppport *********************************/
  3140. /**
  3141. * diag_trigger_master_show - show the diag_trigger_master attribute
  3142. * @cdev: pointer to embedded class device
  3143. * @attr: ?
  3144. * @buf: the buffer returned
  3145. *
  3146. * A sysfs 'read/write' shost attribute.
  3147. */
  3148. static ssize_t
  3149. diag_trigger_master_show(struct device *cdev,
  3150. struct device_attribute *attr, char *buf)
  3151. {
  3152. struct Scsi_Host *shost = class_to_shost(cdev);
  3153. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3154. unsigned long flags;
  3155. ssize_t rc;
  3156. spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
  3157. rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
  3158. memcpy(buf, &ioc->diag_trigger_master, rc);
  3159. spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
  3160. return rc;
  3161. }
  3162. /**
  3163. * diag_trigger_master_store - store the diag_trigger_master attribute
  3164. * @cdev: pointer to embedded class device
  3165. * @attr: ?
  3166. * @buf: the buffer returned
  3167. * @count: ?
  3168. *
  3169. * A sysfs 'read/write' shost attribute.
  3170. */
  3171. static ssize_t
  3172. diag_trigger_master_store(struct device *cdev,
  3173. struct device_attribute *attr, const char *buf, size_t count)
  3174. {
  3175. struct Scsi_Host *shost = class_to_shost(cdev);
  3176. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3177. struct SL_WH_MASTER_TRIGGER_T *master_tg;
  3178. unsigned long flags;
  3179. ssize_t rc;
  3180. bool set = 1;
  3181. rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
  3182. if (ioc->supports_trigger_pages) {
  3183. master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T),
  3184. GFP_KERNEL);
  3185. if (!master_tg)
  3186. return -ENOMEM;
  3187. memcpy(master_tg, buf, rc);
  3188. if (!master_tg->MasterData)
  3189. set = 0;
  3190. if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg,
  3191. set)) {
  3192. kfree(master_tg);
  3193. return -EFAULT;
  3194. }
  3195. kfree(master_tg);
  3196. }
  3197. spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
  3198. memset(&ioc->diag_trigger_master, 0,
  3199. sizeof(struct SL_WH_MASTER_TRIGGER_T));
  3200. memcpy(&ioc->diag_trigger_master, buf, rc);
  3201. ioc->diag_trigger_master.MasterData |=
  3202. (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
  3203. spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
  3204. return rc;
  3205. }
  3206. static DEVICE_ATTR_RW(diag_trigger_master);
  3207. /**
  3208. * diag_trigger_event_show - show the diag_trigger_event attribute
  3209. * @cdev: pointer to embedded class device
  3210. * @attr: ?
  3211. * @buf: the buffer returned
  3212. *
  3213. * A sysfs 'read/write' shost attribute.
  3214. */
  3215. static ssize_t
  3216. diag_trigger_event_show(struct device *cdev,
  3217. struct device_attribute *attr, char *buf)
  3218. {
  3219. struct Scsi_Host *shost = class_to_shost(cdev);
  3220. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3221. unsigned long flags;
  3222. ssize_t rc;
  3223. spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
  3224. rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
  3225. memcpy(buf, &ioc->diag_trigger_event, rc);
  3226. spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
  3227. return rc;
  3228. }
  3229. /**
  3230. * diag_trigger_event_store - store the diag_trigger_event attribute
  3231. * @cdev: pointer to embedded class device
  3232. * @attr: ?
  3233. * @buf: the buffer returned
  3234. * @count: ?
  3235. *
  3236. * A sysfs 'read/write' shost attribute.
  3237. */
  3238. static ssize_t
  3239. diag_trigger_event_store(struct device *cdev,
  3240. struct device_attribute *attr, const char *buf, size_t count)
  3241. {
  3242. struct Scsi_Host *shost = class_to_shost(cdev);
  3243. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3244. struct SL_WH_EVENT_TRIGGERS_T *event_tg;
  3245. unsigned long flags;
  3246. ssize_t sz;
  3247. bool set = 1;
  3248. sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
  3249. if (ioc->supports_trigger_pages) {
  3250. event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T),
  3251. GFP_KERNEL);
  3252. if (!event_tg)
  3253. return -ENOMEM;
  3254. memcpy(event_tg, buf, sz);
  3255. if (!event_tg->ValidEntries)
  3256. set = 0;
  3257. if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg,
  3258. set)) {
  3259. kfree(event_tg);
  3260. return -EFAULT;
  3261. }
  3262. kfree(event_tg);
  3263. }
  3264. spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
  3265. memset(&ioc->diag_trigger_event, 0,
  3266. sizeof(struct SL_WH_EVENT_TRIGGERS_T));
  3267. memcpy(&ioc->diag_trigger_event, buf, sz);
  3268. if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
  3269. ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
  3270. spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
  3271. return sz;
  3272. }
  3273. static DEVICE_ATTR_RW(diag_trigger_event);
  3274. /**
  3275. * diag_trigger_scsi_show - show the diag_trigger_scsi attribute
  3276. * @cdev: pointer to embedded class device
  3277. * @attr: ?
  3278. * @buf: the buffer returned
  3279. *
  3280. * A sysfs 'read/write' shost attribute.
  3281. */
  3282. static ssize_t
  3283. diag_trigger_scsi_show(struct device *cdev,
  3284. struct device_attribute *attr, char *buf)
  3285. {
  3286. struct Scsi_Host *shost = class_to_shost(cdev);
  3287. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3288. unsigned long flags;
  3289. ssize_t rc;
  3290. spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
  3291. rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
  3292. memcpy(buf, &ioc->diag_trigger_scsi, rc);
  3293. spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
  3294. return rc;
  3295. }
  3296. /**
  3297. * diag_trigger_scsi_store - store the diag_trigger_scsi attribute
  3298. * @cdev: pointer to embedded class device
  3299. * @attr: ?
  3300. * @buf: the buffer returned
  3301. * @count: ?
  3302. *
  3303. * A sysfs 'read/write' shost attribute.
  3304. */
  3305. static ssize_t
  3306. diag_trigger_scsi_store(struct device *cdev,
  3307. struct device_attribute *attr, const char *buf, size_t count)
  3308. {
  3309. struct Scsi_Host *shost = class_to_shost(cdev);
  3310. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3311. struct SL_WH_SCSI_TRIGGERS_T *scsi_tg;
  3312. unsigned long flags;
  3313. ssize_t sz;
  3314. bool set = 1;
  3315. sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
  3316. if (ioc->supports_trigger_pages) {
  3317. scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T),
  3318. GFP_KERNEL);
  3319. if (!scsi_tg)
  3320. return -ENOMEM;
  3321. memcpy(scsi_tg, buf, sz);
  3322. if (!scsi_tg->ValidEntries)
  3323. set = 0;
  3324. if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg,
  3325. set)) {
  3326. kfree(scsi_tg);
  3327. return -EFAULT;
  3328. }
  3329. kfree(scsi_tg);
  3330. }
  3331. spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
  3332. memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
  3333. memcpy(&ioc->diag_trigger_scsi, buf, sz);
  3334. if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
  3335. ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
  3336. spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
  3337. return sz;
  3338. }
  3339. static DEVICE_ATTR_RW(diag_trigger_scsi);
  3340. /**
  3341. * diag_trigger_mpi_show - show the diag_trigger_mpi attribute
  3342. * @cdev: pointer to embedded class device
  3343. * @attr: ?
  3344. * @buf: the buffer returned
  3345. *
  3346. * A sysfs 'read/write' shost attribute.
  3347. */
  3348. static ssize_t
  3349. diag_trigger_mpi_show(struct device *cdev,
  3350. struct device_attribute *attr, char *buf)
  3351. {
  3352. struct Scsi_Host *shost = class_to_shost(cdev);
  3353. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3354. unsigned long flags;
  3355. ssize_t rc;
  3356. spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
  3357. rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
  3358. memcpy(buf, &ioc->diag_trigger_mpi, rc);
  3359. spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
  3360. return rc;
  3361. }
  3362. /**
  3363. * diag_trigger_mpi_store - store the diag_trigger_mpi attribute
  3364. * @cdev: pointer to embedded class device
  3365. * @attr: ?
  3366. * @buf: the buffer returned
  3367. * @count: ?
  3368. *
  3369. * A sysfs 'read/write' shost attribute.
  3370. */
  3371. static ssize_t
  3372. diag_trigger_mpi_store(struct device *cdev,
  3373. struct device_attribute *attr, const char *buf, size_t count)
  3374. {
  3375. struct Scsi_Host *shost = class_to_shost(cdev);
  3376. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3377. struct SL_WH_MPI_TRIGGERS_T *mpi_tg;
  3378. unsigned long flags;
  3379. ssize_t sz;
  3380. bool set = 1;
  3381. sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
  3382. if (ioc->supports_trigger_pages) {
  3383. mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T),
  3384. GFP_KERNEL);
  3385. if (!mpi_tg)
  3386. return -ENOMEM;
  3387. memcpy(mpi_tg, buf, sz);
  3388. if (!mpi_tg->ValidEntries)
  3389. set = 0;
  3390. if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg,
  3391. set)) {
  3392. kfree(mpi_tg);
  3393. return -EFAULT;
  3394. }
  3395. kfree(mpi_tg);
  3396. }
  3397. spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
  3398. memset(&ioc->diag_trigger_mpi, 0,
  3399. sizeof(ioc->diag_trigger_mpi));
  3400. memcpy(&ioc->diag_trigger_mpi, buf, sz);
  3401. if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
  3402. ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
  3403. spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
  3404. return sz;
  3405. }
  3406. static DEVICE_ATTR_RW(diag_trigger_mpi);
  3407. /*********** diagnostic trigger suppport *** END ****************************/
  3408. /*****************************************/
  3409. /**
  3410. * drv_support_bitmap_show - driver supported feature bitmap
  3411. * @cdev: pointer to embedded class device
  3412. * @attr: unused
  3413. * @buf: the buffer returned
  3414. *
  3415. * A sysfs 'read-only' shost attribute.
  3416. */
  3417. static ssize_t
  3418. drv_support_bitmap_show(struct device *cdev,
  3419. struct device_attribute *attr, char *buf)
  3420. {
  3421. struct Scsi_Host *shost = class_to_shost(cdev);
  3422. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3423. return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap);
  3424. }
  3425. static DEVICE_ATTR_RO(drv_support_bitmap);
  3426. /**
  3427. * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled
  3428. * @cdev: pointer to embedded class device
  3429. * @attr: unused
  3430. * @buf: the buffer returned
  3431. *
  3432. * A sysfs read/write shost attribute. This attribute is used to set the
  3433. * targets queue depth to HBA IO queue depth if this attribute is enabled.
  3434. */
  3435. static ssize_t
  3436. enable_sdev_max_qd_show(struct device *cdev,
  3437. struct device_attribute *attr, char *buf)
  3438. {
  3439. struct Scsi_Host *shost = class_to_shost(cdev);
  3440. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3441. return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd);
  3442. }
  3443. /**
  3444. * enable_sdev_max_qd_store - Enable/disable sdev max qd
  3445. * @cdev: pointer to embedded class device
  3446. * @attr: unused
  3447. * @buf: the buffer returned
  3448. * @count: unused
  3449. *
  3450. * A sysfs read/write shost attribute. This attribute is used to set the
  3451. * targets queue depth to HBA IO queue depth if this attribute is enabled.
  3452. * If this attribute is disabled then targets will have corresponding default
  3453. * queue depth.
  3454. */
  3455. static ssize_t
  3456. enable_sdev_max_qd_store(struct device *cdev,
  3457. struct device_attribute *attr, const char *buf, size_t count)
  3458. {
  3459. struct Scsi_Host *shost = class_to_shost(cdev);
  3460. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  3461. struct MPT3SAS_DEVICE *sas_device_priv_data;
  3462. struct MPT3SAS_TARGET *sas_target_priv_data;
  3463. int val = 0;
  3464. struct scsi_device *sdev;
  3465. struct _raid_device *raid_device;
  3466. int qdepth;
  3467. if (kstrtoint(buf, 0, &val) != 0)
  3468. return -EINVAL;
  3469. switch (val) {
  3470. case 0:
  3471. ioc->enable_sdev_max_qd = 0;
  3472. shost_for_each_device(sdev, ioc->shost) {
  3473. sas_device_priv_data = sdev->hostdata;
  3474. if (!sas_device_priv_data)
  3475. continue;
  3476. sas_target_priv_data = sas_device_priv_data->sas_target;
  3477. if (!sas_target_priv_data)
  3478. continue;
  3479. if (sas_target_priv_data->flags &
  3480. MPT_TARGET_FLAGS_VOLUME) {
  3481. raid_device =
  3482. mpt3sas_raid_device_find_by_handle(ioc,
  3483. sas_target_priv_data->handle);
  3484. switch (raid_device->volume_type) {
  3485. case MPI2_RAID_VOL_TYPE_RAID0:
  3486. if (raid_device->device_info &
  3487. MPI2_SAS_DEVICE_INFO_SSP_TARGET)
  3488. qdepth =
  3489. MPT3SAS_SAS_QUEUE_DEPTH;
  3490. else
  3491. qdepth =
  3492. MPT3SAS_SATA_QUEUE_DEPTH;
  3493. break;
  3494. case MPI2_RAID_VOL_TYPE_RAID1E:
  3495. case MPI2_RAID_VOL_TYPE_RAID1:
  3496. case MPI2_RAID_VOL_TYPE_RAID10:
  3497. case MPI2_RAID_VOL_TYPE_UNKNOWN:
  3498. default:
  3499. qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
  3500. }
  3501. } else if (sas_target_priv_data->flags &
  3502. MPT_TARGET_FLAGS_PCIE_DEVICE)
  3503. qdepth = ioc->max_nvme_qd;
  3504. else
  3505. qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ?
  3506. ioc->max_wideport_qd : ioc->max_narrowport_qd;
  3507. mpt3sas_scsih_change_queue_depth(sdev, qdepth);
  3508. }
  3509. break;
  3510. case 1:
  3511. ioc->enable_sdev_max_qd = 1;
  3512. shost_for_each_device(sdev, ioc->shost)
  3513. mpt3sas_scsih_change_queue_depth(sdev,
  3514. shost->can_queue);
  3515. break;
  3516. default:
  3517. return -EINVAL;
  3518. }
  3519. return strlen(buf);
  3520. }
  3521. static DEVICE_ATTR_RW(enable_sdev_max_qd);
  3522. static struct attribute *mpt3sas_host_attrs[] = {
  3523. &dev_attr_version_fw.attr,
  3524. &dev_attr_version_bios.attr,
  3525. &dev_attr_version_mpi.attr,
  3526. &dev_attr_version_product.attr,
  3527. &dev_attr_version_nvdata_persistent.attr,
  3528. &dev_attr_version_nvdata_default.attr,
  3529. &dev_attr_board_name.attr,
  3530. &dev_attr_board_assembly.attr,
  3531. &dev_attr_board_tracer.attr,
  3532. &dev_attr_io_delay.attr,
  3533. &dev_attr_device_delay.attr,
  3534. &dev_attr_logging_level.attr,
  3535. &dev_attr_fwfault_debug.attr,
  3536. &dev_attr_fw_queue_depth.attr,
  3537. &dev_attr_host_sas_address.attr,
  3538. &dev_attr_ioc_reset_count.attr,
  3539. &dev_attr_host_trace_buffer_size.attr,
  3540. &dev_attr_host_trace_buffer.attr,
  3541. &dev_attr_host_trace_buffer_enable.attr,
  3542. &dev_attr_reply_queue_count.attr,
  3543. &dev_attr_diag_trigger_master.attr,
  3544. &dev_attr_diag_trigger_event.attr,
  3545. &dev_attr_diag_trigger_scsi.attr,
  3546. &dev_attr_diag_trigger_mpi.attr,
  3547. &dev_attr_drv_support_bitmap.attr,
  3548. &dev_attr_BRM_status.attr,
  3549. &dev_attr_enable_sdev_max_qd.attr,
  3550. NULL,
  3551. };
  3552. static const struct attribute_group mpt3sas_host_attr_group = {
  3553. .attrs = mpt3sas_host_attrs
  3554. };
  3555. const struct attribute_group *mpt3sas_host_groups[] = {
  3556. &mpt3sas_host_attr_group,
  3557. NULL
  3558. };
  3559. /* device attributes */
  3560. /**
  3561. * sas_address_show - sas address
  3562. * @dev: pointer to embedded class device
  3563. * @attr: ?
  3564. * @buf: the buffer returned
  3565. *
  3566. * This is the sas address for the target
  3567. *
  3568. * A sysfs 'read-only' shost attribute.
  3569. */
  3570. static ssize_t
  3571. sas_address_show(struct device *dev, struct device_attribute *attr,
  3572. char *buf)
  3573. {
  3574. struct scsi_device *sdev = to_scsi_device(dev);
  3575. struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
  3576. return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
  3577. (unsigned long long)sas_device_priv_data->sas_target->sas_address);
  3578. }
  3579. static DEVICE_ATTR_RO(sas_address);
  3580. /**
  3581. * sas_device_handle_show - device handle
  3582. * @dev: pointer to embedded class device
  3583. * @attr: ?
  3584. * @buf: the buffer returned
  3585. *
  3586. * This is the firmware assigned device handle
  3587. *
  3588. * A sysfs 'read-only' shost attribute.
  3589. */
  3590. static ssize_t
  3591. sas_device_handle_show(struct device *dev, struct device_attribute *attr,
  3592. char *buf)
  3593. {
  3594. struct scsi_device *sdev = to_scsi_device(dev);
  3595. struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
  3596. return snprintf(buf, PAGE_SIZE, "0x%04x\n",
  3597. sas_device_priv_data->sas_target->handle);
  3598. }
  3599. static DEVICE_ATTR_RO(sas_device_handle);
  3600. /**
  3601. * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
  3602. * @dev: pointer to embedded device
  3603. * @attr: sas_ncq_prio_supported attribute descriptor
  3604. * @buf: the buffer returned
  3605. *
  3606. * A sysfs 'read-only' sdev attribute, only works with SATA
  3607. */
  3608. static ssize_t
  3609. sas_ncq_prio_supported_show(struct device *dev,
  3610. struct device_attribute *attr, char *buf)
  3611. {
  3612. struct scsi_device *sdev = to_scsi_device(dev);
  3613. return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
  3614. }
  3615. static DEVICE_ATTR_RO(sas_ncq_prio_supported);
  3616. /**
  3617. * sas_ncq_prio_enable_show - send prioritized io commands to device
  3618. * @dev: pointer to embedded device
  3619. * @attr: ?
  3620. * @buf: the buffer returned
  3621. *
  3622. * A sysfs 'read/write' sdev attribute, only works with SATA
  3623. */
  3624. static ssize_t
  3625. sas_ncq_prio_enable_show(struct device *dev,
  3626. struct device_attribute *attr, char *buf)
  3627. {
  3628. struct scsi_device *sdev = to_scsi_device(dev);
  3629. struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
  3630. return snprintf(buf, PAGE_SIZE, "%d\n",
  3631. sas_device_priv_data->ncq_prio_enable);
  3632. }
  3633. static ssize_t
  3634. sas_ncq_prio_enable_store(struct device *dev,
  3635. struct device_attribute *attr,
  3636. const char *buf, size_t count)
  3637. {
  3638. struct scsi_device *sdev = to_scsi_device(dev);
  3639. struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
  3640. bool ncq_prio_enable = 0;
  3641. if (kstrtobool(buf, &ncq_prio_enable))
  3642. return -EINVAL;
  3643. if (!scsih_ncq_prio_supp(sdev))
  3644. return -EINVAL;
  3645. sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
  3646. return strlen(buf);
  3647. }
  3648. static DEVICE_ATTR_RW(sas_ncq_prio_enable);
  3649. static struct attribute *mpt3sas_dev_attrs[] = {
  3650. &dev_attr_sas_address.attr,
  3651. &dev_attr_sas_device_handle.attr,
  3652. &dev_attr_sas_ncq_prio_supported.attr,
  3653. &dev_attr_sas_ncq_prio_enable.attr,
  3654. NULL,
  3655. };
  3656. static const struct attribute_group mpt3sas_dev_attr_group = {
  3657. .attrs = mpt3sas_dev_attrs
  3658. };
  3659. const struct attribute_group *mpt3sas_dev_groups[] = {
  3660. &mpt3sas_dev_attr_group,
  3661. NULL
  3662. };
  3663. /* file operations table for mpt3ctl device */
  3664. static const struct file_operations ctl_fops = {
  3665. .owner = THIS_MODULE,
  3666. .unlocked_ioctl = _ctl_ioctl,
  3667. .poll = _ctl_poll,
  3668. .fasync = _ctl_fasync,
  3669. #ifdef CONFIG_COMPAT
  3670. .compat_ioctl = _ctl_ioctl_compat,
  3671. #endif
  3672. };
  3673. /* file operations table for mpt2ctl device */
  3674. static const struct file_operations ctl_gen2_fops = {
  3675. .owner = THIS_MODULE,
  3676. .unlocked_ioctl = _ctl_mpt2_ioctl,
  3677. .poll = _ctl_poll,
  3678. .fasync = _ctl_fasync,
  3679. #ifdef CONFIG_COMPAT
  3680. .compat_ioctl = _ctl_mpt2_ioctl_compat,
  3681. #endif
  3682. };
  3683. static struct miscdevice ctl_dev = {
  3684. .minor = MPT3SAS_MINOR,
  3685. .name = MPT3SAS_DEV_NAME,
  3686. .fops = &ctl_fops,
  3687. };
  3688. static struct miscdevice gen2_ctl_dev = {
  3689. .minor = MPT2SAS_MINOR,
  3690. .name = MPT2SAS_DEV_NAME,
  3691. .fops = &ctl_gen2_fops,
  3692. };
  3693. /**
  3694. * mpt3sas_ctl_init - main entry point for ctl.
  3695. * @hbas_to_enumerate: ?
  3696. */
  3697. void
  3698. mpt3sas_ctl_init(ushort hbas_to_enumerate)
  3699. {
  3700. async_queue = NULL;
  3701. /* Don't register mpt3ctl ioctl device if
  3702. * hbas_to_enumarate is one.
  3703. */
  3704. if (hbas_to_enumerate != 1)
  3705. if (misc_register(&ctl_dev) < 0)
  3706. pr_err("%s can't register misc device [minor=%d]\n",
  3707. MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
  3708. /* Don't register mpt3ctl ioctl device if
  3709. * hbas_to_enumarate is two.
  3710. */
  3711. if (hbas_to_enumerate != 2)
  3712. if (misc_register(&gen2_ctl_dev) < 0)
  3713. pr_err("%s can't register misc device [minor=%d]\n",
  3714. MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
  3715. init_waitqueue_head(&ctl_poll_wait);
  3716. }
  3717. /**
  3718. * mpt3sas_ctl_exit - exit point for ctl
  3719. * @hbas_to_enumerate: ?
  3720. */
  3721. void
  3722. mpt3sas_ctl_exit(ushort hbas_to_enumerate)
  3723. {
  3724. struct MPT3SAS_ADAPTER *ioc;
  3725. int i;
  3726. list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
  3727. /* free memory associated to diag buffers */
  3728. for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
  3729. if (!ioc->diag_buffer[i])
  3730. continue;
  3731. dma_free_coherent(&ioc->pdev->dev,
  3732. ioc->diag_buffer_sz[i],
  3733. ioc->diag_buffer[i],
  3734. ioc->diag_buffer_dma[i]);
  3735. ioc->diag_buffer[i] = NULL;
  3736. ioc->diag_buffer_status[i] = 0;
  3737. }
  3738. kfree(ioc->event_log);
  3739. }
  3740. if (hbas_to_enumerate != 1)
  3741. misc_deregister(&ctl_dev);
  3742. if (hbas_to_enumerate != 2)
  3743. misc_deregister(&gen2_ctl_dev);
  3744. }