myrs.c 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
  4. *
  5. * This driver supports the newer, SCSI-based firmware interface only.
  6. *
  7. * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <[email protected]>
  8. *
  9. * Based on the original DAC960 driver, which has
  10. * Copyright 1998-2001 by Leonard N. Zubkoff <[email protected]>
  11. * Portions Copyright 2002 by Mylex (An IBM Business Unit)
  12. */
  13. #include <linux/module.h>
  14. #include <linux/types.h>
  15. #include <linux/delay.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/pci.h>
  18. #include <linux/raid_class.h>
  19. #include <asm/unaligned.h>
  20. #include <scsi/scsi.h>
  21. #include <scsi/scsi_host.h>
  22. #include <scsi/scsi_device.h>
  23. #include <scsi/scsi_cmnd.h>
  24. #include <scsi/scsi_tcq.h>
  25. #include "myrs.h"
  26. static struct raid_template *myrs_raid_template;
  27. static struct myrs_devstate_name_entry {
  28. enum myrs_devstate state;
  29. char *name;
  30. } myrs_devstate_name_list[] = {
  31. { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
  32. { MYRS_DEVICE_ONLINE, "Online" },
  33. { MYRS_DEVICE_REBUILD, "Rebuild" },
  34. { MYRS_DEVICE_MISSING, "Missing" },
  35. { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
  36. { MYRS_DEVICE_OFFLINE, "Offline" },
  37. { MYRS_DEVICE_CRITICAL, "Critical" },
  38. { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
  39. { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
  40. { MYRS_DEVICE_STANDBY, "Standby" },
  41. { MYRS_DEVICE_INVALID_STATE, "Invalid" },
  42. };
  43. static char *myrs_devstate_name(enum myrs_devstate state)
  44. {
  45. struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
  46. int i;
  47. for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
  48. if (entry[i].state == state)
  49. return entry[i].name;
  50. }
  51. return NULL;
  52. }
  53. static struct myrs_raid_level_name_entry {
  54. enum myrs_raid_level level;
  55. char *name;
  56. } myrs_raid_level_name_list[] = {
  57. { MYRS_RAID_LEVEL0, "RAID0" },
  58. { MYRS_RAID_LEVEL1, "RAID1" },
  59. { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
  60. { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
  61. { MYRS_RAID_LEVEL6, "RAID6" },
  62. { MYRS_RAID_JBOD, "JBOD" },
  63. { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
  64. { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
  65. { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
  66. { MYRS_RAID_SPAN, "Mylex SPAN" },
  67. { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
  68. { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
  69. { MYRS_RAID_PHYSICAL, "Physical device" },
  70. };
  71. static char *myrs_raid_level_name(enum myrs_raid_level level)
  72. {
  73. struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
  74. int i;
  75. for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
  76. if (entry[i].level == level)
  77. return entry[i].name;
  78. }
  79. return NULL;
  80. }
  81. /*
  82. * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
  83. */
  84. static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
  85. {
  86. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  87. memset(mbox, 0, sizeof(union myrs_cmd_mbox));
  88. cmd_blk->status = 0;
  89. }
  90. /*
  91. * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
  92. */
  93. static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
  94. {
  95. void __iomem *base = cs->io_base;
  96. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  97. union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
  98. cs->write_cmd_mbox(next_mbox, mbox);
  99. if (cs->prev_cmd_mbox1->words[0] == 0 ||
  100. cs->prev_cmd_mbox2->words[0] == 0)
  101. cs->get_cmd_mbox(base);
  102. cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
  103. cs->prev_cmd_mbox1 = next_mbox;
  104. if (++next_mbox > cs->last_cmd_mbox)
  105. next_mbox = cs->first_cmd_mbox;
  106. cs->next_cmd_mbox = next_mbox;
  107. }
  108. /*
  109. * myrs_exec_cmd - executes V2 Command and waits for completion.
  110. */
  111. static void myrs_exec_cmd(struct myrs_hba *cs,
  112. struct myrs_cmdblk *cmd_blk)
  113. {
  114. DECLARE_COMPLETION_ONSTACK(complete);
  115. unsigned long flags;
  116. cmd_blk->complete = &complete;
  117. spin_lock_irqsave(&cs->queue_lock, flags);
  118. myrs_qcmd(cs, cmd_blk);
  119. spin_unlock_irqrestore(&cs->queue_lock, flags);
  120. wait_for_completion(&complete);
  121. }
  122. /*
  123. * myrs_report_progress - prints progress message
  124. */
  125. static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
  126. unsigned char *msg, unsigned long blocks,
  127. unsigned long size)
  128. {
  129. shost_printk(KERN_INFO, cs->host,
  130. "Logical Drive %d: %s in Progress: %d%% completed\n",
  131. ldev_num, msg,
  132. (100 * (int)(blocks >> 7)) / (int)(size >> 7));
  133. }
  134. /*
  135. * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
  136. */
  137. static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
  138. {
  139. struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
  140. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  141. dma_addr_t ctlr_info_addr;
  142. union myrs_sgl *sgl;
  143. unsigned char status;
  144. unsigned short ldev_present, ldev_critical, ldev_offline;
  145. ldev_present = cs->ctlr_info->ldev_present;
  146. ldev_critical = cs->ctlr_info->ldev_critical;
  147. ldev_offline = cs->ctlr_info->ldev_offline;
  148. ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
  149. sizeof(struct myrs_ctlr_info),
  150. DMA_FROM_DEVICE);
  151. if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
  152. return MYRS_STATUS_FAILED;
  153. mutex_lock(&cs->dcmd_mutex);
  154. myrs_reset_cmd(cmd_blk);
  155. mbox->ctlr_info.id = MYRS_DCMD_TAG;
  156. mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
  157. mbox->ctlr_info.control.dma_ctrl_to_host = true;
  158. mbox->ctlr_info.control.no_autosense = true;
  159. mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
  160. mbox->ctlr_info.ctlr_num = 0;
  161. mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
  162. sgl = &mbox->ctlr_info.dma_addr;
  163. sgl->sge[0].sge_addr = ctlr_info_addr;
  164. sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
  165. dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
  166. myrs_exec_cmd(cs, cmd_blk);
  167. status = cmd_blk->status;
  168. mutex_unlock(&cs->dcmd_mutex);
  169. dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
  170. sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
  171. if (status == MYRS_STATUS_SUCCESS) {
  172. if (cs->ctlr_info->bg_init_active +
  173. cs->ctlr_info->ldev_init_active +
  174. cs->ctlr_info->pdev_init_active +
  175. cs->ctlr_info->cc_active +
  176. cs->ctlr_info->rbld_active +
  177. cs->ctlr_info->exp_active != 0)
  178. cs->needs_update = true;
  179. if (cs->ctlr_info->ldev_present != ldev_present ||
  180. cs->ctlr_info->ldev_critical != ldev_critical ||
  181. cs->ctlr_info->ldev_offline != ldev_offline)
  182. shost_printk(KERN_INFO, cs->host,
  183. "Logical drive count changes (%d/%d/%d)\n",
  184. cs->ctlr_info->ldev_critical,
  185. cs->ctlr_info->ldev_offline,
  186. cs->ctlr_info->ldev_present);
  187. }
  188. return status;
  189. }
  190. /*
  191. * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
  192. */
  193. static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
  194. unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
  195. {
  196. struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
  197. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  198. dma_addr_t ldev_info_addr;
  199. struct myrs_ldev_info ldev_info_orig;
  200. union myrs_sgl *sgl;
  201. unsigned char status;
  202. memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
  203. ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
  204. sizeof(struct myrs_ldev_info),
  205. DMA_FROM_DEVICE);
  206. if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
  207. return MYRS_STATUS_FAILED;
  208. mutex_lock(&cs->dcmd_mutex);
  209. myrs_reset_cmd(cmd_blk);
  210. mbox->ldev_info.id = MYRS_DCMD_TAG;
  211. mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
  212. mbox->ldev_info.control.dma_ctrl_to_host = true;
  213. mbox->ldev_info.control.no_autosense = true;
  214. mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
  215. mbox->ldev_info.ldev.ldev_num = ldev_num;
  216. mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
  217. sgl = &mbox->ldev_info.dma_addr;
  218. sgl->sge[0].sge_addr = ldev_info_addr;
  219. sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
  220. dev_dbg(&cs->host->shost_gendev,
  221. "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
  222. myrs_exec_cmd(cs, cmd_blk);
  223. status = cmd_blk->status;
  224. mutex_unlock(&cs->dcmd_mutex);
  225. dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
  226. sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
  227. if (status == MYRS_STATUS_SUCCESS) {
  228. unsigned short ldev_num = ldev_info->ldev_num;
  229. struct myrs_ldev_info *new = ldev_info;
  230. struct myrs_ldev_info *old = &ldev_info_orig;
  231. unsigned long ldev_size = new->cfg_devsize;
  232. if (new->dev_state != old->dev_state) {
  233. const char *name;
  234. name = myrs_devstate_name(new->dev_state);
  235. shost_printk(KERN_INFO, cs->host,
  236. "Logical Drive %d is now %s\n",
  237. ldev_num, name ? name : "Invalid");
  238. }
  239. if ((new->soft_errs != old->soft_errs) ||
  240. (new->cmds_failed != old->cmds_failed) ||
  241. (new->deferred_write_errs != old->deferred_write_errs))
  242. shost_printk(KERN_INFO, cs->host,
  243. "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
  244. ldev_num, new->soft_errs,
  245. new->cmds_failed,
  246. new->deferred_write_errs);
  247. if (new->bg_init_active)
  248. myrs_report_progress(cs, ldev_num,
  249. "Background Initialization",
  250. new->bg_init_lba, ldev_size);
  251. else if (new->fg_init_active)
  252. myrs_report_progress(cs, ldev_num,
  253. "Foreground Initialization",
  254. new->fg_init_lba, ldev_size);
  255. else if (new->migration_active)
  256. myrs_report_progress(cs, ldev_num,
  257. "Data Migration",
  258. new->migration_lba, ldev_size);
  259. else if (new->patrol_active)
  260. myrs_report_progress(cs, ldev_num,
  261. "Patrol Operation",
  262. new->patrol_lba, ldev_size);
  263. if (old->bg_init_active && !new->bg_init_active)
  264. shost_printk(KERN_INFO, cs->host,
  265. "Logical Drive %d: Background Initialization %s\n",
  266. ldev_num,
  267. (new->ldev_control.ldev_init_done ?
  268. "Completed" : "Failed"));
  269. }
  270. return status;
  271. }
  272. /*
  273. * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
  274. */
  275. static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
  276. unsigned char channel, unsigned char target, unsigned char lun,
  277. struct myrs_pdev_info *pdev_info)
  278. {
  279. struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
  280. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  281. dma_addr_t pdev_info_addr;
  282. union myrs_sgl *sgl;
  283. unsigned char status;
  284. pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
  285. sizeof(struct myrs_pdev_info),
  286. DMA_FROM_DEVICE);
  287. if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
  288. return MYRS_STATUS_FAILED;
  289. mutex_lock(&cs->dcmd_mutex);
  290. myrs_reset_cmd(cmd_blk);
  291. mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
  292. mbox->pdev_info.id = MYRS_DCMD_TAG;
  293. mbox->pdev_info.control.dma_ctrl_to_host = true;
  294. mbox->pdev_info.control.no_autosense = true;
  295. mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
  296. mbox->pdev_info.pdev.lun = lun;
  297. mbox->pdev_info.pdev.target = target;
  298. mbox->pdev_info.pdev.channel = channel;
  299. mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
  300. sgl = &mbox->pdev_info.dma_addr;
  301. sgl->sge[0].sge_addr = pdev_info_addr;
  302. sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
  303. dev_dbg(&cs->host->shost_gendev,
  304. "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
  305. channel, target, lun);
  306. myrs_exec_cmd(cs, cmd_blk);
  307. status = cmd_blk->status;
  308. mutex_unlock(&cs->dcmd_mutex);
  309. dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
  310. sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
  311. return status;
  312. }
  313. /*
  314. * myrs_dev_op - executes a "Device Operation" Command
  315. */
  316. static unsigned char myrs_dev_op(struct myrs_hba *cs,
  317. enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
  318. {
  319. struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
  320. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  321. unsigned char status;
  322. mutex_lock(&cs->dcmd_mutex);
  323. myrs_reset_cmd(cmd_blk);
  324. mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
  325. mbox->dev_op.id = MYRS_DCMD_TAG;
  326. mbox->dev_op.control.dma_ctrl_to_host = true;
  327. mbox->dev_op.control.no_autosense = true;
  328. mbox->dev_op.ioctl_opcode = opcode;
  329. mbox->dev_op.opdev = opdev;
  330. myrs_exec_cmd(cs, cmd_blk);
  331. status = cmd_blk->status;
  332. mutex_unlock(&cs->dcmd_mutex);
  333. return status;
  334. }
  335. /*
  336. * myrs_translate_pdev - translates a Physical Device Channel and
  337. * TargetID into a Logical Device.
  338. */
  339. static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
  340. unsigned char channel, unsigned char target, unsigned char lun,
  341. struct myrs_devmap *devmap)
  342. {
  343. struct pci_dev *pdev = cs->pdev;
  344. dma_addr_t devmap_addr;
  345. struct myrs_cmdblk *cmd_blk;
  346. union myrs_cmd_mbox *mbox;
  347. union myrs_sgl *sgl;
  348. unsigned char status;
  349. memset(devmap, 0x0, sizeof(struct myrs_devmap));
  350. devmap_addr = dma_map_single(&pdev->dev, devmap,
  351. sizeof(struct myrs_devmap),
  352. DMA_FROM_DEVICE);
  353. if (dma_mapping_error(&pdev->dev, devmap_addr))
  354. return MYRS_STATUS_FAILED;
  355. mutex_lock(&cs->dcmd_mutex);
  356. cmd_blk = &cs->dcmd_blk;
  357. mbox = &cmd_blk->mbox;
  358. mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
  359. mbox->pdev_info.control.dma_ctrl_to_host = true;
  360. mbox->pdev_info.control.no_autosense = true;
  361. mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
  362. mbox->pdev_info.pdev.target = target;
  363. mbox->pdev_info.pdev.channel = channel;
  364. mbox->pdev_info.pdev.lun = lun;
  365. mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
  366. sgl = &mbox->pdev_info.dma_addr;
  367. sgl->sge[0].sge_addr = devmap_addr;
  368. sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
  369. myrs_exec_cmd(cs, cmd_blk);
  370. status = cmd_blk->status;
  371. mutex_unlock(&cs->dcmd_mutex);
  372. dma_unmap_single(&pdev->dev, devmap_addr,
  373. sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
  374. return status;
  375. }
  376. /*
  377. * myrs_get_event - executes a Get Event Command
  378. */
  379. static unsigned char myrs_get_event(struct myrs_hba *cs,
  380. unsigned int event_num, struct myrs_event *event_buf)
  381. {
  382. struct pci_dev *pdev = cs->pdev;
  383. dma_addr_t event_addr;
  384. struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
  385. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  386. union myrs_sgl *sgl;
  387. unsigned char status;
  388. event_addr = dma_map_single(&pdev->dev, event_buf,
  389. sizeof(struct myrs_event), DMA_FROM_DEVICE);
  390. if (dma_mapping_error(&pdev->dev, event_addr))
  391. return MYRS_STATUS_FAILED;
  392. mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
  393. mbox->get_event.dma_size = sizeof(struct myrs_event);
  394. mbox->get_event.evnum_upper = event_num >> 16;
  395. mbox->get_event.ctlr_num = 0;
  396. mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
  397. mbox->get_event.evnum_lower = event_num & 0xFFFF;
  398. sgl = &mbox->get_event.dma_addr;
  399. sgl->sge[0].sge_addr = event_addr;
  400. sgl->sge[0].sge_count = mbox->get_event.dma_size;
  401. myrs_exec_cmd(cs, cmd_blk);
  402. status = cmd_blk->status;
  403. dma_unmap_single(&pdev->dev, event_addr,
  404. sizeof(struct myrs_event), DMA_FROM_DEVICE);
  405. return status;
  406. }
  407. /*
  408. * myrs_get_fwstatus - executes a Get Health Status Command
  409. */
  410. static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
  411. {
  412. struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
  413. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  414. union myrs_sgl *sgl;
  415. unsigned char status = cmd_blk->status;
  416. myrs_reset_cmd(cmd_blk);
  417. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  418. mbox->common.id = MYRS_MCMD_TAG;
  419. mbox->common.control.dma_ctrl_to_host = true;
  420. mbox->common.control.no_autosense = true;
  421. mbox->common.dma_size = sizeof(struct myrs_fwstat);
  422. mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
  423. sgl = &mbox->common.dma_addr;
  424. sgl->sge[0].sge_addr = cs->fwstat_addr;
  425. sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
  426. dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
  427. myrs_exec_cmd(cs, cmd_blk);
  428. status = cmd_blk->status;
  429. return status;
  430. }
  431. /*
  432. * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
  433. */
  434. static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
  435. enable_mbox_t enable_mbox_fn)
  436. {
  437. void __iomem *base = cs->io_base;
  438. struct pci_dev *pdev = cs->pdev;
  439. union myrs_cmd_mbox *cmd_mbox;
  440. struct myrs_stat_mbox *stat_mbox;
  441. union myrs_cmd_mbox *mbox;
  442. dma_addr_t mbox_addr;
  443. unsigned char status = MYRS_STATUS_FAILED;
  444. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
  445. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
  446. dev_err(&pdev->dev, "DMA mask out of range\n");
  447. return false;
  448. }
  449. /* Temporary dma mapping, used only in the scope of this function */
  450. mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
  451. &mbox_addr, GFP_KERNEL);
  452. if (dma_mapping_error(&pdev->dev, mbox_addr))
  453. return false;
  454. /* These are the base addresses for the command memory mailbox array */
  455. cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
  456. cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
  457. &cs->cmd_mbox_addr, GFP_KERNEL);
  458. if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
  459. dev_err(&pdev->dev, "Failed to map command mailbox\n");
  460. goto out_free;
  461. }
  462. cs->first_cmd_mbox = cmd_mbox;
  463. cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
  464. cs->last_cmd_mbox = cmd_mbox;
  465. cs->next_cmd_mbox = cs->first_cmd_mbox;
  466. cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
  467. cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
  468. /* These are the base addresses for the status memory mailbox array */
  469. cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
  470. stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
  471. &cs->stat_mbox_addr, GFP_KERNEL);
  472. if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
  473. dev_err(&pdev->dev, "Failed to map status mailbox\n");
  474. goto out_free;
  475. }
  476. cs->first_stat_mbox = stat_mbox;
  477. stat_mbox += MYRS_MAX_STAT_MBOX - 1;
  478. cs->last_stat_mbox = stat_mbox;
  479. cs->next_stat_mbox = cs->first_stat_mbox;
  480. cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
  481. sizeof(struct myrs_fwstat),
  482. &cs->fwstat_addr, GFP_KERNEL);
  483. if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
  484. dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
  485. cs->fwstat_buf = NULL;
  486. goto out_free;
  487. }
  488. cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL);
  489. if (!cs->ctlr_info)
  490. goto out_free;
  491. cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL);
  492. if (!cs->event_buf)
  493. goto out_free;
  494. /* Enable the Memory Mailbox Interface. */
  495. memset(mbox, 0, sizeof(union myrs_cmd_mbox));
  496. mbox->set_mbox.id = 1;
  497. mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
  498. mbox->set_mbox.control.no_autosense = true;
  499. mbox->set_mbox.first_cmd_mbox_size_kb =
  500. (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
  501. mbox->set_mbox.first_stat_mbox_size_kb =
  502. (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
  503. mbox->set_mbox.second_cmd_mbox_size_kb = 0;
  504. mbox->set_mbox.second_stat_mbox_size_kb = 0;
  505. mbox->set_mbox.sense_len = 0;
  506. mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
  507. mbox->set_mbox.fwstat_buf_size_kb = 1;
  508. mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
  509. mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
  510. mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
  511. status = enable_mbox_fn(base, mbox_addr);
  512. out_free:
  513. dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
  514. mbox, mbox_addr);
  515. if (status != MYRS_STATUS_SUCCESS)
  516. dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
  517. status);
  518. return (status == MYRS_STATUS_SUCCESS);
  519. }
  520. /*
  521. * myrs_get_config - reads the Configuration Information
  522. */
  523. static int myrs_get_config(struct myrs_hba *cs)
  524. {
  525. struct myrs_ctlr_info *info = cs->ctlr_info;
  526. struct Scsi_Host *shost = cs->host;
  527. unsigned char status;
  528. unsigned char model[20];
  529. unsigned char fw_version[12];
  530. int i, model_len;
  531. /* Get data into dma-able area, then copy into permanent location */
  532. mutex_lock(&cs->cinfo_mutex);
  533. status = myrs_get_ctlr_info(cs);
  534. mutex_unlock(&cs->cinfo_mutex);
  535. if (status != MYRS_STATUS_SUCCESS) {
  536. shost_printk(KERN_ERR, shost,
  537. "Failed to get controller information\n");
  538. return -ENODEV;
  539. }
  540. /* Initialize the Controller Model Name and Full Model Name fields. */
  541. model_len = sizeof(info->ctlr_name);
  542. if (model_len > sizeof(model)-1)
  543. model_len = sizeof(model)-1;
  544. memcpy(model, info->ctlr_name, model_len);
  545. model_len--;
  546. while (model[model_len] == ' ' || model[model_len] == '\0')
  547. model_len--;
  548. model[++model_len] = '\0';
  549. strcpy(cs->model_name, "DAC960 ");
  550. strcat(cs->model_name, model);
  551. /* Initialize the Controller Firmware Version field. */
  552. sprintf(fw_version, "%d.%02d-%02d",
  553. info->fw_major_version, info->fw_minor_version,
  554. info->fw_turn_number);
  555. if (info->fw_major_version == 6 &&
  556. info->fw_minor_version == 0 &&
  557. info->fw_turn_number < 1) {
  558. shost_printk(KERN_WARNING, shost,
  559. "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
  560. "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
  561. "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
  562. fw_version);
  563. return -ENODEV;
  564. }
  565. /* Initialize the Controller Channels and Targets. */
  566. shost->max_channel = info->physchan_present + info->virtchan_present;
  567. shost->max_id = info->max_targets[0];
  568. for (i = 1; i < 16; i++) {
  569. if (!info->max_targets[i])
  570. continue;
  571. if (shost->max_id < info->max_targets[i])
  572. shost->max_id = info->max_targets[i];
  573. }
  574. /*
  575. * Initialize the Controller Queue Depth, Driver Queue Depth,
  576. * Logical Drive Count, Maximum Blocks per Command, Controller
  577. * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
  578. * The Driver Queue Depth must be at most three less than
  579. * the Controller Queue Depth; tag '1' is reserved for
  580. * direct commands, and tag '2' for monitoring commands.
  581. */
  582. shost->can_queue = info->max_tcq - 3;
  583. if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
  584. shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
  585. shost->max_sectors = info->max_transfer_size;
  586. shost->sg_tablesize = info->max_sge;
  587. if (shost->sg_tablesize > MYRS_SG_LIMIT)
  588. shost->sg_tablesize = MYRS_SG_LIMIT;
  589. shost_printk(KERN_INFO, shost,
  590. "Configuring %s PCI RAID Controller\n", model);
  591. shost_printk(KERN_INFO, shost,
  592. " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
  593. fw_version, info->physchan_present, info->mem_size_mb);
  594. shost_printk(KERN_INFO, shost,
  595. " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
  596. shost->can_queue, shost->max_sectors);
  597. shost_printk(KERN_INFO, shost,
  598. " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
  599. shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
  600. for (i = 0; i < info->physchan_max; i++) {
  601. if (!info->max_targets[i])
  602. continue;
  603. shost_printk(KERN_INFO, shost,
  604. " Device Channel %d: max %d devices\n",
  605. i, info->max_targets[i]);
  606. }
  607. shost_printk(KERN_INFO, shost,
  608. " Physical: %d/%d channels, %d disks, %d devices\n",
  609. info->physchan_present, info->physchan_max,
  610. info->pdisk_present, info->pdev_present);
  611. shost_printk(KERN_INFO, shost,
  612. " Logical: %d/%d channels, %d disks\n",
  613. info->virtchan_present, info->virtchan_max,
  614. info->ldev_present);
  615. return 0;
  616. }
  617. /*
  618. * myrs_log_event - prints a Controller Event message
  619. */
  620. static struct {
  621. int ev_code;
  622. unsigned char *ev_msg;
  623. } myrs_ev_list[] = {
  624. /* Physical Device Events (0x0000 - 0x007F) */
  625. { 0x0001, "P Online" },
  626. { 0x0002, "P Standby" },
  627. { 0x0005, "P Automatic Rebuild Started" },
  628. { 0x0006, "P Manual Rebuild Started" },
  629. { 0x0007, "P Rebuild Completed" },
  630. { 0x0008, "P Rebuild Cancelled" },
  631. { 0x0009, "P Rebuild Failed for Unknown Reasons" },
  632. { 0x000A, "P Rebuild Failed due to New Physical Device" },
  633. { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
  634. { 0x000C, "S Offline" },
  635. { 0x000D, "P Found" },
  636. { 0x000E, "P Removed" },
  637. { 0x000F, "P Unconfigured" },
  638. { 0x0010, "P Expand Capacity Started" },
  639. { 0x0011, "P Expand Capacity Completed" },
  640. { 0x0012, "P Expand Capacity Failed" },
  641. { 0x0013, "P Command Timed Out" },
  642. { 0x0014, "P Command Aborted" },
  643. { 0x0015, "P Command Retried" },
  644. { 0x0016, "P Parity Error" },
  645. { 0x0017, "P Soft Error" },
  646. { 0x0018, "P Miscellaneous Error" },
  647. { 0x0019, "P Reset" },
  648. { 0x001A, "P Active Spare Found" },
  649. { 0x001B, "P Warm Spare Found" },
  650. { 0x001C, "S Sense Data Received" },
  651. { 0x001D, "P Initialization Started" },
  652. { 0x001E, "P Initialization Completed" },
  653. { 0x001F, "P Initialization Failed" },
  654. { 0x0020, "P Initialization Cancelled" },
  655. { 0x0021, "P Failed because Write Recovery Failed" },
  656. { 0x0022, "P Failed because SCSI Bus Reset Failed" },
  657. { 0x0023, "P Failed because of Double Check Condition" },
  658. { 0x0024, "P Failed because Device Cannot Be Accessed" },
  659. { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
  660. { 0x0026, "P Failed because of Bad Tag from Device" },
  661. { 0x0027, "P Failed because of Command Timeout" },
  662. { 0x0028, "P Failed because of System Reset" },
  663. { 0x0029, "P Failed because of Busy Status or Parity Error" },
  664. { 0x002A, "P Failed because Host Set Device to Failed State" },
  665. { 0x002B, "P Failed because of Selection Timeout" },
  666. { 0x002C, "P Failed because of SCSI Bus Phase Error" },
  667. { 0x002D, "P Failed because Device Returned Unknown Status" },
  668. { 0x002E, "P Failed because Device Not Ready" },
  669. { 0x002F, "P Failed because Device Not Found at Startup" },
  670. { 0x0030, "P Failed because COD Write Operation Failed" },
  671. { 0x0031, "P Failed because BDT Write Operation Failed" },
  672. { 0x0039, "P Missing at Startup" },
  673. { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
  674. { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
  675. { 0x003D, "P Standby Rebuild Started" },
  676. /* Logical Device Events (0x0080 - 0x00FF) */
  677. { 0x0080, "M Consistency Check Started" },
  678. { 0x0081, "M Consistency Check Completed" },
  679. { 0x0082, "M Consistency Check Cancelled" },
  680. { 0x0083, "M Consistency Check Completed With Errors" },
  681. { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
  682. { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
  683. { 0x0086, "L Offline" },
  684. { 0x0087, "L Critical" },
  685. { 0x0088, "L Online" },
  686. { 0x0089, "M Automatic Rebuild Started" },
  687. { 0x008A, "M Manual Rebuild Started" },
  688. { 0x008B, "M Rebuild Completed" },
  689. { 0x008C, "M Rebuild Cancelled" },
  690. { 0x008D, "M Rebuild Failed for Unknown Reasons" },
  691. { 0x008E, "M Rebuild Failed due to New Physical Device" },
  692. { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
  693. { 0x0090, "M Initialization Started" },
  694. { 0x0091, "M Initialization Completed" },
  695. { 0x0092, "M Initialization Cancelled" },
  696. { 0x0093, "M Initialization Failed" },
  697. { 0x0094, "L Found" },
  698. { 0x0095, "L Deleted" },
  699. { 0x0096, "M Expand Capacity Started" },
  700. { 0x0097, "M Expand Capacity Completed" },
  701. { 0x0098, "M Expand Capacity Failed" },
  702. { 0x0099, "L Bad Block Found" },
  703. { 0x009A, "L Size Changed" },
  704. { 0x009B, "L Type Changed" },
  705. { 0x009C, "L Bad Data Block Found" },
  706. { 0x009E, "L Read of Data Block in BDT" },
  707. { 0x009F, "L Write Back Data for Disk Block Lost" },
  708. { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
  709. { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
  710. { 0x00A2, "L Standby Rebuild Started" },
  711. /* Fault Management Events (0x0100 - 0x017F) */
  712. { 0x0140, "E Fan %d Failed" },
  713. { 0x0141, "E Fan %d OK" },
  714. { 0x0142, "E Fan %d Not Present" },
  715. { 0x0143, "E Power Supply %d Failed" },
  716. { 0x0144, "E Power Supply %d OK" },
  717. { 0x0145, "E Power Supply %d Not Present" },
  718. { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
  719. { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
  720. { 0x0148, "E Temperature Sensor %d Temperature Normal" },
  721. { 0x0149, "E Temperature Sensor %d Not Present" },
  722. { 0x014A, "E Enclosure Management Unit %d Access Critical" },
  723. { 0x014B, "E Enclosure Management Unit %d Access OK" },
  724. { 0x014C, "E Enclosure Management Unit %d Access Offline" },
  725. /* Controller Events (0x0180 - 0x01FF) */
  726. { 0x0181, "C Cache Write Back Error" },
  727. { 0x0188, "C Battery Backup Unit Found" },
  728. { 0x0189, "C Battery Backup Unit Charge Level Low" },
  729. { 0x018A, "C Battery Backup Unit Charge Level OK" },
  730. { 0x0193, "C Installation Aborted" },
  731. { 0x0195, "C Battery Backup Unit Physically Removed" },
  732. { 0x0196, "C Memory Error During Warm Boot" },
  733. { 0x019E, "C Memory Soft ECC Error Corrected" },
  734. { 0x019F, "C Memory Hard ECC Error Corrected" },
  735. { 0x01A2, "C Battery Backup Unit Failed" },
  736. { 0x01AB, "C Mirror Race Recovery Failed" },
  737. { 0x01AC, "C Mirror Race on Critical Drive" },
  738. /* Controller Internal Processor Events */
  739. { 0x0380, "C Internal Controller Hung" },
  740. { 0x0381, "C Internal Controller Firmware Breakpoint" },
  741. { 0x0390, "C Internal Controller i960 Processor Specific Error" },
  742. { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
  743. { 0, "" }
  744. };
  745. static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
  746. {
  747. unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
  748. int ev_idx = 0, ev_code;
  749. unsigned char ev_type, *ev_msg;
  750. struct Scsi_Host *shost = cs->host;
  751. struct scsi_device *sdev;
  752. struct scsi_sense_hdr sshdr = {0};
  753. unsigned char sense_info[4];
  754. unsigned char cmd_specific[4];
  755. if (ev->ev_code == 0x1C) {
  756. if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
  757. memset(&sshdr, 0x0, sizeof(sshdr));
  758. memset(sense_info, 0x0, sizeof(sense_info));
  759. memset(cmd_specific, 0x0, sizeof(cmd_specific));
  760. } else {
  761. memcpy(sense_info, &ev->sense_data[3], 4);
  762. memcpy(cmd_specific, &ev->sense_data[7], 4);
  763. }
  764. }
  765. if (sshdr.sense_key == VENDOR_SPECIFIC &&
  766. (sshdr.asc == 0x80 || sshdr.asc == 0x81))
  767. ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
  768. while (true) {
  769. ev_code = myrs_ev_list[ev_idx].ev_code;
  770. if (ev_code == ev->ev_code || ev_code == 0)
  771. break;
  772. ev_idx++;
  773. }
  774. ev_type = myrs_ev_list[ev_idx].ev_msg[0];
  775. ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
  776. if (ev_code == 0) {
  777. shost_printk(KERN_WARNING, shost,
  778. "Unknown Controller Event Code %04X\n",
  779. ev->ev_code);
  780. return;
  781. }
  782. switch (ev_type) {
  783. case 'P':
  784. sdev = scsi_device_lookup(shost, ev->channel,
  785. ev->target, 0);
  786. sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
  787. ev->ev_seq, ev_msg);
  788. if (sdev && sdev->hostdata &&
  789. sdev->channel < cs->ctlr_info->physchan_present) {
  790. struct myrs_pdev_info *pdev_info = sdev->hostdata;
  791. switch (ev->ev_code) {
  792. case 0x0001:
  793. case 0x0007:
  794. pdev_info->dev_state = MYRS_DEVICE_ONLINE;
  795. break;
  796. case 0x0002:
  797. pdev_info->dev_state = MYRS_DEVICE_STANDBY;
  798. break;
  799. case 0x000C:
  800. pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
  801. break;
  802. case 0x000E:
  803. pdev_info->dev_state = MYRS_DEVICE_MISSING;
  804. break;
  805. case 0x000F:
  806. pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
  807. break;
  808. }
  809. }
  810. break;
  811. case 'L':
  812. shost_printk(KERN_INFO, shost,
  813. "event %d: Logical Drive %d %s\n",
  814. ev->ev_seq, ev->lun, ev_msg);
  815. cs->needs_update = true;
  816. break;
  817. case 'M':
  818. shost_printk(KERN_INFO, shost,
  819. "event %d: Logical Drive %d %s\n",
  820. ev->ev_seq, ev->lun, ev_msg);
  821. cs->needs_update = true;
  822. break;
  823. case 'S':
  824. if (sshdr.sense_key == NO_SENSE ||
  825. (sshdr.sense_key == NOT_READY &&
  826. sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
  827. sshdr.ascq == 0x02)))
  828. break;
  829. shost_printk(KERN_INFO, shost,
  830. "event %d: Physical Device %d:%d %s\n",
  831. ev->ev_seq, ev->channel, ev->target, ev_msg);
  832. shost_printk(KERN_INFO, shost,
  833. "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
  834. ev->channel, ev->target,
  835. sshdr.sense_key, sshdr.asc, sshdr.ascq);
  836. shost_printk(KERN_INFO, shost,
  837. "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
  838. ev->channel, ev->target,
  839. sense_info[0], sense_info[1],
  840. sense_info[2], sense_info[3],
  841. cmd_specific[0], cmd_specific[1],
  842. cmd_specific[2], cmd_specific[3]);
  843. break;
  844. case 'E':
  845. if (cs->disable_enc_msg)
  846. break;
  847. sprintf(msg_buf, ev_msg, ev->lun);
  848. shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
  849. ev->ev_seq, ev->target, msg_buf);
  850. break;
  851. case 'C':
  852. shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
  853. ev->ev_seq, ev_msg);
  854. break;
  855. default:
  856. shost_printk(KERN_INFO, shost,
  857. "event %d: Unknown Event Code %04X\n",
  858. ev->ev_seq, ev->ev_code);
  859. break;
  860. }
  861. }
  862. /*
  863. * SCSI sysfs interface functions
  864. */
  865. static ssize_t raid_state_show(struct device *dev,
  866. struct device_attribute *attr, char *buf)
  867. {
  868. struct scsi_device *sdev = to_scsi_device(dev);
  869. struct myrs_hba *cs = shost_priv(sdev->host);
  870. int ret;
  871. if (!sdev->hostdata)
  872. return snprintf(buf, 16, "Unknown\n");
  873. if (sdev->channel >= cs->ctlr_info->physchan_present) {
  874. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  875. const char *name;
  876. name = myrs_devstate_name(ldev_info->dev_state);
  877. if (name)
  878. ret = snprintf(buf, 32, "%s\n", name);
  879. else
  880. ret = snprintf(buf, 32, "Invalid (%02X)\n",
  881. ldev_info->dev_state);
  882. } else {
  883. struct myrs_pdev_info *pdev_info;
  884. const char *name;
  885. pdev_info = sdev->hostdata;
  886. name = myrs_devstate_name(pdev_info->dev_state);
  887. if (name)
  888. ret = snprintf(buf, 32, "%s\n", name);
  889. else
  890. ret = snprintf(buf, 32, "Invalid (%02X)\n",
  891. pdev_info->dev_state);
  892. }
  893. return ret;
  894. }
  895. static ssize_t raid_state_store(struct device *dev,
  896. struct device_attribute *attr, const char *buf, size_t count)
  897. {
  898. struct scsi_device *sdev = to_scsi_device(dev);
  899. struct myrs_hba *cs = shost_priv(sdev->host);
  900. struct myrs_cmdblk *cmd_blk;
  901. union myrs_cmd_mbox *mbox;
  902. enum myrs_devstate new_state;
  903. unsigned short ldev_num;
  904. unsigned char status;
  905. if (!strncmp(buf, "offline", 7) ||
  906. !strncmp(buf, "kill", 4))
  907. new_state = MYRS_DEVICE_OFFLINE;
  908. else if (!strncmp(buf, "online", 6))
  909. new_state = MYRS_DEVICE_ONLINE;
  910. else if (!strncmp(buf, "standby", 7))
  911. new_state = MYRS_DEVICE_STANDBY;
  912. else
  913. return -EINVAL;
  914. if (sdev->channel < cs->ctlr_info->physchan_present) {
  915. struct myrs_pdev_info *pdev_info = sdev->hostdata;
  916. struct myrs_devmap *pdev_devmap =
  917. (struct myrs_devmap *)&pdev_info->rsvd13;
  918. if (pdev_info->dev_state == new_state) {
  919. sdev_printk(KERN_INFO, sdev,
  920. "Device already in %s\n",
  921. myrs_devstate_name(new_state));
  922. return count;
  923. }
  924. status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
  925. sdev->lun, pdev_devmap);
  926. if (status != MYRS_STATUS_SUCCESS)
  927. return -ENXIO;
  928. ldev_num = pdev_devmap->ldev_num;
  929. } else {
  930. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  931. if (ldev_info->dev_state == new_state) {
  932. sdev_printk(KERN_INFO, sdev,
  933. "Device already in %s\n",
  934. myrs_devstate_name(new_state));
  935. return count;
  936. }
  937. ldev_num = ldev_info->ldev_num;
  938. }
  939. mutex_lock(&cs->dcmd_mutex);
  940. cmd_blk = &cs->dcmd_blk;
  941. myrs_reset_cmd(cmd_blk);
  942. mbox = &cmd_blk->mbox;
  943. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  944. mbox->common.id = MYRS_DCMD_TAG;
  945. mbox->common.control.dma_ctrl_to_host = true;
  946. mbox->common.control.no_autosense = true;
  947. mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
  948. mbox->set_devstate.state = new_state;
  949. mbox->set_devstate.ldev.ldev_num = ldev_num;
  950. myrs_exec_cmd(cs, cmd_blk);
  951. status = cmd_blk->status;
  952. mutex_unlock(&cs->dcmd_mutex);
  953. if (status == MYRS_STATUS_SUCCESS) {
  954. if (sdev->channel < cs->ctlr_info->physchan_present) {
  955. struct myrs_pdev_info *pdev_info = sdev->hostdata;
  956. pdev_info->dev_state = new_state;
  957. } else {
  958. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  959. ldev_info->dev_state = new_state;
  960. }
  961. sdev_printk(KERN_INFO, sdev,
  962. "Set device state to %s\n",
  963. myrs_devstate_name(new_state));
  964. return count;
  965. }
  966. sdev_printk(KERN_INFO, sdev,
  967. "Failed to set device state to %s, status 0x%02x\n",
  968. myrs_devstate_name(new_state), status);
  969. return -EINVAL;
  970. }
  971. static DEVICE_ATTR_RW(raid_state);
  972. static ssize_t raid_level_show(struct device *dev,
  973. struct device_attribute *attr, char *buf)
  974. {
  975. struct scsi_device *sdev = to_scsi_device(dev);
  976. struct myrs_hba *cs = shost_priv(sdev->host);
  977. const char *name = NULL;
  978. if (!sdev->hostdata)
  979. return snprintf(buf, 16, "Unknown\n");
  980. if (sdev->channel >= cs->ctlr_info->physchan_present) {
  981. struct myrs_ldev_info *ldev_info;
  982. ldev_info = sdev->hostdata;
  983. name = myrs_raid_level_name(ldev_info->raid_level);
  984. if (!name)
  985. return snprintf(buf, 32, "Invalid (%02X)\n",
  986. ldev_info->dev_state);
  987. } else
  988. name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
  989. return snprintf(buf, 32, "%s\n", name);
  990. }
  991. static DEVICE_ATTR_RO(raid_level);
  992. static ssize_t rebuild_show(struct device *dev,
  993. struct device_attribute *attr, char *buf)
  994. {
  995. struct scsi_device *sdev = to_scsi_device(dev);
  996. struct myrs_hba *cs = shost_priv(sdev->host);
  997. struct myrs_ldev_info *ldev_info;
  998. unsigned short ldev_num;
  999. unsigned char status;
  1000. if (sdev->channel < cs->ctlr_info->physchan_present)
  1001. return snprintf(buf, 32, "physical device - not rebuilding\n");
  1002. ldev_info = sdev->hostdata;
  1003. ldev_num = ldev_info->ldev_num;
  1004. status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1005. if (status != MYRS_STATUS_SUCCESS) {
  1006. sdev_printk(KERN_INFO, sdev,
  1007. "Failed to get device information, status 0x%02x\n",
  1008. status);
  1009. return -EIO;
  1010. }
  1011. if (ldev_info->rbld_active) {
  1012. return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
  1013. (size_t)ldev_info->rbld_lba,
  1014. (size_t)ldev_info->cfg_devsize);
  1015. } else
  1016. return snprintf(buf, 32, "not rebuilding\n");
  1017. }
  1018. static ssize_t rebuild_store(struct device *dev,
  1019. struct device_attribute *attr, const char *buf, size_t count)
  1020. {
  1021. struct scsi_device *sdev = to_scsi_device(dev);
  1022. struct myrs_hba *cs = shost_priv(sdev->host);
  1023. struct myrs_ldev_info *ldev_info;
  1024. struct myrs_cmdblk *cmd_blk;
  1025. union myrs_cmd_mbox *mbox;
  1026. unsigned short ldev_num;
  1027. unsigned char status;
  1028. int rebuild, ret;
  1029. if (sdev->channel < cs->ctlr_info->physchan_present)
  1030. return -EINVAL;
  1031. ldev_info = sdev->hostdata;
  1032. if (!ldev_info)
  1033. return -ENXIO;
  1034. ldev_num = ldev_info->ldev_num;
  1035. ret = kstrtoint(buf, 0, &rebuild);
  1036. if (ret)
  1037. return ret;
  1038. status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1039. if (status != MYRS_STATUS_SUCCESS) {
  1040. sdev_printk(KERN_INFO, sdev,
  1041. "Failed to get device information, status 0x%02x\n",
  1042. status);
  1043. return -EIO;
  1044. }
  1045. if (rebuild && ldev_info->rbld_active) {
  1046. sdev_printk(KERN_INFO, sdev,
  1047. "Rebuild Not Initiated; already in progress\n");
  1048. return -EALREADY;
  1049. }
  1050. if (!rebuild && !ldev_info->rbld_active) {
  1051. sdev_printk(KERN_INFO, sdev,
  1052. "Rebuild Not Cancelled; no rebuild in progress\n");
  1053. return count;
  1054. }
  1055. mutex_lock(&cs->dcmd_mutex);
  1056. cmd_blk = &cs->dcmd_blk;
  1057. myrs_reset_cmd(cmd_blk);
  1058. mbox = &cmd_blk->mbox;
  1059. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  1060. mbox->common.id = MYRS_DCMD_TAG;
  1061. mbox->common.control.dma_ctrl_to_host = true;
  1062. mbox->common.control.no_autosense = true;
  1063. if (rebuild) {
  1064. mbox->ldev_info.ldev.ldev_num = ldev_num;
  1065. mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
  1066. } else {
  1067. mbox->ldev_info.ldev.ldev_num = ldev_num;
  1068. mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
  1069. }
  1070. myrs_exec_cmd(cs, cmd_blk);
  1071. status = cmd_blk->status;
  1072. mutex_unlock(&cs->dcmd_mutex);
  1073. if (status) {
  1074. sdev_printk(KERN_INFO, sdev,
  1075. "Rebuild Not %s, status 0x%02x\n",
  1076. rebuild ? "Initiated" : "Cancelled", status);
  1077. ret = -EIO;
  1078. } else {
  1079. sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
  1080. rebuild ? "Initiated" : "Cancelled");
  1081. ret = count;
  1082. }
  1083. return ret;
  1084. }
  1085. static DEVICE_ATTR_RW(rebuild);
  1086. static ssize_t consistency_check_show(struct device *dev,
  1087. struct device_attribute *attr, char *buf)
  1088. {
  1089. struct scsi_device *sdev = to_scsi_device(dev);
  1090. struct myrs_hba *cs = shost_priv(sdev->host);
  1091. struct myrs_ldev_info *ldev_info;
  1092. unsigned short ldev_num;
  1093. if (sdev->channel < cs->ctlr_info->physchan_present)
  1094. return snprintf(buf, 32, "physical device - not checking\n");
  1095. ldev_info = sdev->hostdata;
  1096. if (!ldev_info)
  1097. return -ENXIO;
  1098. ldev_num = ldev_info->ldev_num;
  1099. myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1100. if (ldev_info->cc_active)
  1101. return snprintf(buf, 32, "checking block %zu of %zu\n",
  1102. (size_t)ldev_info->cc_lba,
  1103. (size_t)ldev_info->cfg_devsize);
  1104. else
  1105. return snprintf(buf, 32, "not checking\n");
  1106. }
  1107. static ssize_t consistency_check_store(struct device *dev,
  1108. struct device_attribute *attr, const char *buf, size_t count)
  1109. {
  1110. struct scsi_device *sdev = to_scsi_device(dev);
  1111. struct myrs_hba *cs = shost_priv(sdev->host);
  1112. struct myrs_ldev_info *ldev_info;
  1113. struct myrs_cmdblk *cmd_blk;
  1114. union myrs_cmd_mbox *mbox;
  1115. unsigned short ldev_num;
  1116. unsigned char status;
  1117. int check, ret;
  1118. if (sdev->channel < cs->ctlr_info->physchan_present)
  1119. return -EINVAL;
  1120. ldev_info = sdev->hostdata;
  1121. if (!ldev_info)
  1122. return -ENXIO;
  1123. ldev_num = ldev_info->ldev_num;
  1124. ret = kstrtoint(buf, 0, &check);
  1125. if (ret)
  1126. return ret;
  1127. status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1128. if (status != MYRS_STATUS_SUCCESS) {
  1129. sdev_printk(KERN_INFO, sdev,
  1130. "Failed to get device information, status 0x%02x\n",
  1131. status);
  1132. return -EIO;
  1133. }
  1134. if (check && ldev_info->cc_active) {
  1135. sdev_printk(KERN_INFO, sdev,
  1136. "Consistency Check Not Initiated; "
  1137. "already in progress\n");
  1138. return -EALREADY;
  1139. }
  1140. if (!check && !ldev_info->cc_active) {
  1141. sdev_printk(KERN_INFO, sdev,
  1142. "Consistency Check Not Cancelled; "
  1143. "check not in progress\n");
  1144. return count;
  1145. }
  1146. mutex_lock(&cs->dcmd_mutex);
  1147. cmd_blk = &cs->dcmd_blk;
  1148. myrs_reset_cmd(cmd_blk);
  1149. mbox = &cmd_blk->mbox;
  1150. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  1151. mbox->common.id = MYRS_DCMD_TAG;
  1152. mbox->common.control.dma_ctrl_to_host = true;
  1153. mbox->common.control.no_autosense = true;
  1154. if (check) {
  1155. mbox->cc.ldev.ldev_num = ldev_num;
  1156. mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
  1157. mbox->cc.restore_consistency = true;
  1158. mbox->cc.initialized_area_only = false;
  1159. } else {
  1160. mbox->cc.ldev.ldev_num = ldev_num;
  1161. mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
  1162. }
  1163. myrs_exec_cmd(cs, cmd_blk);
  1164. status = cmd_blk->status;
  1165. mutex_unlock(&cs->dcmd_mutex);
  1166. if (status != MYRS_STATUS_SUCCESS) {
  1167. sdev_printk(KERN_INFO, sdev,
  1168. "Consistency Check Not %s, status 0x%02x\n",
  1169. check ? "Initiated" : "Cancelled", status);
  1170. ret = -EIO;
  1171. } else {
  1172. sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
  1173. check ? "Initiated" : "Cancelled");
  1174. ret = count;
  1175. }
  1176. return ret;
  1177. }
  1178. static DEVICE_ATTR_RW(consistency_check);
  1179. static struct attribute *myrs_sdev_attrs[] = {
  1180. &dev_attr_consistency_check.attr,
  1181. &dev_attr_rebuild.attr,
  1182. &dev_attr_raid_state.attr,
  1183. &dev_attr_raid_level.attr,
  1184. NULL,
  1185. };
  1186. ATTRIBUTE_GROUPS(myrs_sdev);
  1187. static ssize_t serial_show(struct device *dev,
  1188. struct device_attribute *attr, char *buf)
  1189. {
  1190. struct Scsi_Host *shost = class_to_shost(dev);
  1191. struct myrs_hba *cs = shost_priv(shost);
  1192. char serial[17];
  1193. memcpy(serial, cs->ctlr_info->serial_number, 16);
  1194. serial[16] = '\0';
  1195. return snprintf(buf, 16, "%s\n", serial);
  1196. }
  1197. static DEVICE_ATTR_RO(serial);
  1198. static ssize_t ctlr_num_show(struct device *dev,
  1199. struct device_attribute *attr, char *buf)
  1200. {
  1201. struct Scsi_Host *shost = class_to_shost(dev);
  1202. struct myrs_hba *cs = shost_priv(shost);
  1203. return snprintf(buf, 20, "%d\n", cs->host->host_no);
  1204. }
  1205. static DEVICE_ATTR_RO(ctlr_num);
  1206. static struct myrs_cpu_type_tbl {
  1207. enum myrs_cpu_type type;
  1208. char *name;
  1209. } myrs_cpu_type_names[] = {
  1210. { MYRS_CPUTYPE_i960CA, "i960CA" },
  1211. { MYRS_CPUTYPE_i960RD, "i960RD" },
  1212. { MYRS_CPUTYPE_i960RN, "i960RN" },
  1213. { MYRS_CPUTYPE_i960RP, "i960RP" },
  1214. { MYRS_CPUTYPE_NorthBay, "NorthBay" },
  1215. { MYRS_CPUTYPE_StrongArm, "StrongARM" },
  1216. { MYRS_CPUTYPE_i960RM, "i960RM" },
  1217. };
  1218. static ssize_t processor_show(struct device *dev,
  1219. struct device_attribute *attr, char *buf)
  1220. {
  1221. struct Scsi_Host *shost = class_to_shost(dev);
  1222. struct myrs_hba *cs = shost_priv(shost);
  1223. struct myrs_cpu_type_tbl *tbl;
  1224. const char *first_processor = NULL;
  1225. const char *second_processor = NULL;
  1226. struct myrs_ctlr_info *info = cs->ctlr_info;
  1227. ssize_t ret;
  1228. int i;
  1229. if (info->cpu[0].cpu_count) {
  1230. tbl = myrs_cpu_type_names;
  1231. for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
  1232. if (tbl[i].type == info->cpu[0].cpu_type) {
  1233. first_processor = tbl[i].name;
  1234. break;
  1235. }
  1236. }
  1237. }
  1238. if (info->cpu[1].cpu_count) {
  1239. tbl = myrs_cpu_type_names;
  1240. for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
  1241. if (tbl[i].type == info->cpu[1].cpu_type) {
  1242. second_processor = tbl[i].name;
  1243. break;
  1244. }
  1245. }
  1246. }
  1247. if (first_processor && second_processor)
  1248. ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
  1249. "2: %s (%s, %d cpus)\n",
  1250. info->cpu[0].cpu_name,
  1251. first_processor, info->cpu[0].cpu_count,
  1252. info->cpu[1].cpu_name,
  1253. second_processor, info->cpu[1].cpu_count);
  1254. else if (first_processor && !second_processor)
  1255. ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
  1256. info->cpu[0].cpu_name,
  1257. first_processor, info->cpu[0].cpu_count);
  1258. else if (!first_processor && second_processor)
  1259. ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
  1260. info->cpu[1].cpu_name,
  1261. second_processor, info->cpu[1].cpu_count);
  1262. else
  1263. ret = snprintf(buf, 64, "1: absent\n2: absent\n");
  1264. return ret;
  1265. }
  1266. static DEVICE_ATTR_RO(processor);
  1267. static ssize_t model_show(struct device *dev,
  1268. struct device_attribute *attr, char *buf)
  1269. {
  1270. struct Scsi_Host *shost = class_to_shost(dev);
  1271. struct myrs_hba *cs = shost_priv(shost);
  1272. return snprintf(buf, 28, "%s\n", cs->model_name);
  1273. }
  1274. static DEVICE_ATTR_RO(model);
  1275. static ssize_t ctlr_type_show(struct device *dev,
  1276. struct device_attribute *attr, char *buf)
  1277. {
  1278. struct Scsi_Host *shost = class_to_shost(dev);
  1279. struct myrs_hba *cs = shost_priv(shost);
  1280. return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
  1281. }
  1282. static DEVICE_ATTR_RO(ctlr_type);
  1283. static ssize_t cache_size_show(struct device *dev,
  1284. struct device_attribute *attr, char *buf)
  1285. {
  1286. struct Scsi_Host *shost = class_to_shost(dev);
  1287. struct myrs_hba *cs = shost_priv(shost);
  1288. return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
  1289. }
  1290. static DEVICE_ATTR_RO(cache_size);
  1291. static ssize_t firmware_show(struct device *dev,
  1292. struct device_attribute *attr, char *buf)
  1293. {
  1294. struct Scsi_Host *shost = class_to_shost(dev);
  1295. struct myrs_hba *cs = shost_priv(shost);
  1296. return snprintf(buf, 16, "%d.%02d-%02d\n",
  1297. cs->ctlr_info->fw_major_version,
  1298. cs->ctlr_info->fw_minor_version,
  1299. cs->ctlr_info->fw_turn_number);
  1300. }
  1301. static DEVICE_ATTR_RO(firmware);
  1302. static ssize_t discovery_store(struct device *dev,
  1303. struct device_attribute *attr, const char *buf, size_t count)
  1304. {
  1305. struct Scsi_Host *shost = class_to_shost(dev);
  1306. struct myrs_hba *cs = shost_priv(shost);
  1307. struct myrs_cmdblk *cmd_blk;
  1308. union myrs_cmd_mbox *mbox;
  1309. unsigned char status;
  1310. mutex_lock(&cs->dcmd_mutex);
  1311. cmd_blk = &cs->dcmd_blk;
  1312. myrs_reset_cmd(cmd_blk);
  1313. mbox = &cmd_blk->mbox;
  1314. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  1315. mbox->common.id = MYRS_DCMD_TAG;
  1316. mbox->common.control.dma_ctrl_to_host = true;
  1317. mbox->common.control.no_autosense = true;
  1318. mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
  1319. myrs_exec_cmd(cs, cmd_blk);
  1320. status = cmd_blk->status;
  1321. mutex_unlock(&cs->dcmd_mutex);
  1322. if (status != MYRS_STATUS_SUCCESS) {
  1323. shost_printk(KERN_INFO, shost,
  1324. "Discovery Not Initiated, status %02X\n",
  1325. status);
  1326. return -EINVAL;
  1327. }
  1328. shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
  1329. cs->next_evseq = 0;
  1330. cs->needs_update = true;
  1331. queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
  1332. flush_delayed_work(&cs->monitor_work);
  1333. shost_printk(KERN_INFO, shost, "Discovery Completed\n");
  1334. return count;
  1335. }
  1336. static DEVICE_ATTR_WO(discovery);
  1337. static ssize_t flush_cache_store(struct device *dev,
  1338. struct device_attribute *attr, const char *buf, size_t count)
  1339. {
  1340. struct Scsi_Host *shost = class_to_shost(dev);
  1341. struct myrs_hba *cs = shost_priv(shost);
  1342. unsigned char status;
  1343. status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
  1344. MYRS_RAID_CONTROLLER);
  1345. if (status == MYRS_STATUS_SUCCESS) {
  1346. shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
  1347. return count;
  1348. }
  1349. shost_printk(KERN_INFO, shost,
  1350. "Cache Flush failed, status 0x%02x\n", status);
  1351. return -EIO;
  1352. }
  1353. static DEVICE_ATTR_WO(flush_cache);
  1354. static ssize_t disable_enclosure_messages_show(struct device *dev,
  1355. struct device_attribute *attr, char *buf)
  1356. {
  1357. struct Scsi_Host *shost = class_to_shost(dev);
  1358. struct myrs_hba *cs = shost_priv(shost);
  1359. return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
  1360. }
  1361. static ssize_t disable_enclosure_messages_store(struct device *dev,
  1362. struct device_attribute *attr, const char *buf, size_t count)
  1363. {
  1364. struct scsi_device *sdev = to_scsi_device(dev);
  1365. struct myrs_hba *cs = shost_priv(sdev->host);
  1366. int value, ret;
  1367. ret = kstrtoint(buf, 0, &value);
  1368. if (ret)
  1369. return ret;
  1370. if (value > 2)
  1371. return -EINVAL;
  1372. cs->disable_enc_msg = value;
  1373. return count;
  1374. }
  1375. static DEVICE_ATTR_RW(disable_enclosure_messages);
  1376. static struct attribute *myrs_shost_attrs[] = {
  1377. &dev_attr_serial.attr,
  1378. &dev_attr_ctlr_num.attr,
  1379. &dev_attr_processor.attr,
  1380. &dev_attr_model.attr,
  1381. &dev_attr_ctlr_type.attr,
  1382. &dev_attr_cache_size.attr,
  1383. &dev_attr_firmware.attr,
  1384. &dev_attr_discovery.attr,
  1385. &dev_attr_flush_cache.attr,
  1386. &dev_attr_disable_enclosure_messages.attr,
  1387. NULL,
  1388. };
  1389. ATTRIBUTE_GROUPS(myrs_shost);
  1390. /*
  1391. * SCSI midlayer interface
  1392. */
  1393. static int myrs_host_reset(struct scsi_cmnd *scmd)
  1394. {
  1395. struct Scsi_Host *shost = scmd->device->host;
  1396. struct myrs_hba *cs = shost_priv(shost);
  1397. cs->reset(cs->io_base);
  1398. return SUCCESS;
  1399. }
  1400. static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
  1401. struct myrs_ldev_info *ldev_info)
  1402. {
  1403. unsigned char modes[32], *mode_pg;
  1404. bool dbd;
  1405. size_t mode_len;
  1406. dbd = (scmd->cmnd[1] & 0x08) == 0x08;
  1407. if (dbd) {
  1408. mode_len = 24;
  1409. mode_pg = &modes[4];
  1410. } else {
  1411. mode_len = 32;
  1412. mode_pg = &modes[12];
  1413. }
  1414. memset(modes, 0, sizeof(modes));
  1415. modes[0] = mode_len - 1;
  1416. modes[2] = 0x10; /* Enable FUA */
  1417. if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
  1418. modes[2] |= 0x80;
  1419. if (!dbd) {
  1420. unsigned char *block_desc = &modes[4];
  1421. modes[3] = 8;
  1422. put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
  1423. put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
  1424. }
  1425. mode_pg[0] = 0x08;
  1426. mode_pg[1] = 0x12;
  1427. if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
  1428. mode_pg[2] |= 0x01;
  1429. if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
  1430. ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
  1431. mode_pg[2] |= 0x04;
  1432. if (ldev_info->cacheline_size) {
  1433. mode_pg[2] |= 0x08;
  1434. put_unaligned_be16(1 << ldev_info->cacheline_size,
  1435. &mode_pg[14]);
  1436. }
  1437. scsi_sg_copy_from_buffer(scmd, modes, mode_len);
  1438. }
  1439. static int myrs_queuecommand(struct Scsi_Host *shost,
  1440. struct scsi_cmnd *scmd)
  1441. {
  1442. struct request *rq = scsi_cmd_to_rq(scmd);
  1443. struct myrs_hba *cs = shost_priv(shost);
  1444. struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
  1445. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  1446. struct scsi_device *sdev = scmd->device;
  1447. union myrs_sgl *hw_sge;
  1448. dma_addr_t sense_addr;
  1449. struct scatterlist *sgl;
  1450. unsigned long flags, timeout;
  1451. int nsge;
  1452. if (!scmd->device->hostdata) {
  1453. scmd->result = (DID_NO_CONNECT << 16);
  1454. scsi_done(scmd);
  1455. return 0;
  1456. }
  1457. switch (scmd->cmnd[0]) {
  1458. case REPORT_LUNS:
  1459. scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0);
  1460. scsi_done(scmd);
  1461. return 0;
  1462. case MODE_SENSE:
  1463. if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
  1464. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1465. if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
  1466. (scmd->cmnd[2] & 0x3F) != 0x08) {
  1467. /* Illegal request, invalid field in CDB */
  1468. scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
  1469. } else {
  1470. myrs_mode_sense(cs, scmd, ldev_info);
  1471. scmd->result = (DID_OK << 16);
  1472. }
  1473. scsi_done(scmd);
  1474. return 0;
  1475. }
  1476. break;
  1477. }
  1478. myrs_reset_cmd(cmd_blk);
  1479. cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
  1480. &sense_addr);
  1481. if (!cmd_blk->sense)
  1482. return SCSI_MLQUEUE_HOST_BUSY;
  1483. cmd_blk->sense_addr = sense_addr;
  1484. timeout = rq->timeout;
  1485. if (scmd->cmd_len <= 10) {
  1486. if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
  1487. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1488. mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
  1489. mbox->SCSI_10.pdev.lun = ldev_info->lun;
  1490. mbox->SCSI_10.pdev.target = ldev_info->target;
  1491. mbox->SCSI_10.pdev.channel = ldev_info->channel;
  1492. mbox->SCSI_10.pdev.ctlr = 0;
  1493. } else {
  1494. mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
  1495. mbox->SCSI_10.pdev.lun = sdev->lun;
  1496. mbox->SCSI_10.pdev.target = sdev->id;
  1497. mbox->SCSI_10.pdev.channel = sdev->channel;
  1498. }
  1499. mbox->SCSI_10.id = rq->tag + 3;
  1500. mbox->SCSI_10.control.dma_ctrl_to_host =
  1501. (scmd->sc_data_direction == DMA_FROM_DEVICE);
  1502. if (rq->cmd_flags & REQ_FUA)
  1503. mbox->SCSI_10.control.fua = true;
  1504. mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
  1505. mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
  1506. mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
  1507. mbox->SCSI_10.cdb_len = scmd->cmd_len;
  1508. if (timeout > 60) {
  1509. mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
  1510. mbox->SCSI_10.tmo.tmo_val = timeout / 60;
  1511. } else {
  1512. mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
  1513. mbox->SCSI_10.tmo.tmo_val = timeout;
  1514. }
  1515. memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
  1516. hw_sge = &mbox->SCSI_10.dma_addr;
  1517. cmd_blk->dcdb = NULL;
  1518. } else {
  1519. dma_addr_t dcdb_dma;
  1520. cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
  1521. &dcdb_dma);
  1522. if (!cmd_blk->dcdb) {
  1523. dma_pool_free(cs->sense_pool, cmd_blk->sense,
  1524. cmd_blk->sense_addr);
  1525. cmd_blk->sense = NULL;
  1526. cmd_blk->sense_addr = 0;
  1527. return SCSI_MLQUEUE_HOST_BUSY;
  1528. }
  1529. cmd_blk->dcdb_dma = dcdb_dma;
  1530. if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
  1531. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1532. mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
  1533. mbox->SCSI_255.pdev.lun = ldev_info->lun;
  1534. mbox->SCSI_255.pdev.target = ldev_info->target;
  1535. mbox->SCSI_255.pdev.channel = ldev_info->channel;
  1536. mbox->SCSI_255.pdev.ctlr = 0;
  1537. } else {
  1538. mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
  1539. mbox->SCSI_255.pdev.lun = sdev->lun;
  1540. mbox->SCSI_255.pdev.target = sdev->id;
  1541. mbox->SCSI_255.pdev.channel = sdev->channel;
  1542. }
  1543. mbox->SCSI_255.id = rq->tag + 3;
  1544. mbox->SCSI_255.control.dma_ctrl_to_host =
  1545. (scmd->sc_data_direction == DMA_FROM_DEVICE);
  1546. if (rq->cmd_flags & REQ_FUA)
  1547. mbox->SCSI_255.control.fua = true;
  1548. mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
  1549. mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
  1550. mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
  1551. mbox->SCSI_255.cdb_len = scmd->cmd_len;
  1552. mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
  1553. if (timeout > 60) {
  1554. mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
  1555. mbox->SCSI_255.tmo.tmo_val = timeout / 60;
  1556. } else {
  1557. mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
  1558. mbox->SCSI_255.tmo.tmo_val = timeout;
  1559. }
  1560. memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
  1561. hw_sge = &mbox->SCSI_255.dma_addr;
  1562. }
  1563. if (scmd->sc_data_direction == DMA_NONE)
  1564. goto submit;
  1565. nsge = scsi_dma_map(scmd);
  1566. if (nsge == 1) {
  1567. sgl = scsi_sglist(scmd);
  1568. hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
  1569. hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
  1570. } else {
  1571. struct myrs_sge *hw_sgl;
  1572. dma_addr_t hw_sgl_addr;
  1573. int i;
  1574. if (nsge > 2) {
  1575. hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
  1576. &hw_sgl_addr);
  1577. if (WARN_ON(!hw_sgl)) {
  1578. if (cmd_blk->dcdb) {
  1579. dma_pool_free(cs->dcdb_pool,
  1580. cmd_blk->dcdb,
  1581. cmd_blk->dcdb_dma);
  1582. cmd_blk->dcdb = NULL;
  1583. cmd_blk->dcdb_dma = 0;
  1584. }
  1585. dma_pool_free(cs->sense_pool,
  1586. cmd_blk->sense,
  1587. cmd_blk->sense_addr);
  1588. cmd_blk->sense = NULL;
  1589. cmd_blk->sense_addr = 0;
  1590. return SCSI_MLQUEUE_HOST_BUSY;
  1591. }
  1592. cmd_blk->sgl = hw_sgl;
  1593. cmd_blk->sgl_addr = hw_sgl_addr;
  1594. if (scmd->cmd_len <= 10)
  1595. mbox->SCSI_10.control.add_sge_mem = true;
  1596. else
  1597. mbox->SCSI_255.control.add_sge_mem = true;
  1598. hw_sge->ext.sge0_len = nsge;
  1599. hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
  1600. } else
  1601. hw_sgl = hw_sge->sge;
  1602. scsi_for_each_sg(scmd, sgl, nsge, i) {
  1603. if (WARN_ON(!hw_sgl)) {
  1604. scsi_dma_unmap(scmd);
  1605. scmd->result = (DID_ERROR << 16);
  1606. scsi_done(scmd);
  1607. return 0;
  1608. }
  1609. hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
  1610. hw_sgl->sge_count = (u64)sg_dma_len(sgl);
  1611. hw_sgl++;
  1612. }
  1613. }
  1614. submit:
  1615. spin_lock_irqsave(&cs->queue_lock, flags);
  1616. myrs_qcmd(cs, cmd_blk);
  1617. spin_unlock_irqrestore(&cs->queue_lock, flags);
  1618. return 0;
  1619. }
  1620. static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
  1621. struct scsi_device *sdev)
  1622. {
  1623. unsigned short ldev_num;
  1624. unsigned int chan_offset =
  1625. sdev->channel - cs->ctlr_info->physchan_present;
  1626. ldev_num = sdev->id + chan_offset * sdev->host->max_id;
  1627. return ldev_num;
  1628. }
  1629. static int myrs_slave_alloc(struct scsi_device *sdev)
  1630. {
  1631. struct myrs_hba *cs = shost_priv(sdev->host);
  1632. unsigned char status;
  1633. if (sdev->channel > sdev->host->max_channel)
  1634. return 0;
  1635. if (sdev->channel >= cs->ctlr_info->physchan_present) {
  1636. struct myrs_ldev_info *ldev_info;
  1637. unsigned short ldev_num;
  1638. if (sdev->lun > 0)
  1639. return -ENXIO;
  1640. ldev_num = myrs_translate_ldev(cs, sdev);
  1641. ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
  1642. if (!ldev_info)
  1643. return -ENOMEM;
  1644. status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1645. if (status != MYRS_STATUS_SUCCESS) {
  1646. sdev->hostdata = NULL;
  1647. kfree(ldev_info);
  1648. } else {
  1649. enum raid_level level;
  1650. dev_dbg(&sdev->sdev_gendev,
  1651. "Logical device mapping %d:%d:%d -> %d\n",
  1652. ldev_info->channel, ldev_info->target,
  1653. ldev_info->lun, ldev_info->ldev_num);
  1654. sdev->hostdata = ldev_info;
  1655. switch (ldev_info->raid_level) {
  1656. case MYRS_RAID_LEVEL0:
  1657. level = RAID_LEVEL_LINEAR;
  1658. break;
  1659. case MYRS_RAID_LEVEL1:
  1660. level = RAID_LEVEL_1;
  1661. break;
  1662. case MYRS_RAID_LEVEL3:
  1663. case MYRS_RAID_LEVEL3F:
  1664. case MYRS_RAID_LEVEL3L:
  1665. level = RAID_LEVEL_3;
  1666. break;
  1667. case MYRS_RAID_LEVEL5:
  1668. case MYRS_RAID_LEVEL5L:
  1669. level = RAID_LEVEL_5;
  1670. break;
  1671. case MYRS_RAID_LEVEL6:
  1672. level = RAID_LEVEL_6;
  1673. break;
  1674. case MYRS_RAID_LEVELE:
  1675. case MYRS_RAID_NEWSPAN:
  1676. case MYRS_RAID_SPAN:
  1677. level = RAID_LEVEL_LINEAR;
  1678. break;
  1679. case MYRS_RAID_JBOD:
  1680. level = RAID_LEVEL_JBOD;
  1681. break;
  1682. default:
  1683. level = RAID_LEVEL_UNKNOWN;
  1684. break;
  1685. }
  1686. raid_set_level(myrs_raid_template,
  1687. &sdev->sdev_gendev, level);
  1688. if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
  1689. const char *name;
  1690. name = myrs_devstate_name(ldev_info->dev_state);
  1691. sdev_printk(KERN_DEBUG, sdev,
  1692. "logical device in state %s\n",
  1693. name ? name : "Invalid");
  1694. }
  1695. }
  1696. } else {
  1697. struct myrs_pdev_info *pdev_info;
  1698. pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
  1699. if (!pdev_info)
  1700. return -ENOMEM;
  1701. status = myrs_get_pdev_info(cs, sdev->channel,
  1702. sdev->id, sdev->lun,
  1703. pdev_info);
  1704. if (status != MYRS_STATUS_SUCCESS) {
  1705. sdev->hostdata = NULL;
  1706. kfree(pdev_info);
  1707. return -ENXIO;
  1708. }
  1709. sdev->hostdata = pdev_info;
  1710. }
  1711. return 0;
  1712. }
  1713. static int myrs_slave_configure(struct scsi_device *sdev)
  1714. {
  1715. struct myrs_hba *cs = shost_priv(sdev->host);
  1716. struct myrs_ldev_info *ldev_info;
  1717. if (sdev->channel > sdev->host->max_channel)
  1718. return -ENXIO;
  1719. if (sdev->channel < cs->ctlr_info->physchan_present) {
  1720. /* Skip HBA device */
  1721. if (sdev->type == TYPE_RAID)
  1722. return -ENXIO;
  1723. sdev->no_uld_attach = 1;
  1724. return 0;
  1725. }
  1726. if (sdev->lun != 0)
  1727. return -ENXIO;
  1728. ldev_info = sdev->hostdata;
  1729. if (!ldev_info)
  1730. return -ENXIO;
  1731. if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
  1732. ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
  1733. sdev->wce_default_on = 1;
  1734. sdev->tagged_supported = 1;
  1735. return 0;
  1736. }
  1737. static void myrs_slave_destroy(struct scsi_device *sdev)
  1738. {
  1739. kfree(sdev->hostdata);
  1740. }
  1741. static struct scsi_host_template myrs_template = {
  1742. .module = THIS_MODULE,
  1743. .name = "DAC960",
  1744. .proc_name = "myrs",
  1745. .queuecommand = myrs_queuecommand,
  1746. .eh_host_reset_handler = myrs_host_reset,
  1747. .slave_alloc = myrs_slave_alloc,
  1748. .slave_configure = myrs_slave_configure,
  1749. .slave_destroy = myrs_slave_destroy,
  1750. .cmd_size = sizeof(struct myrs_cmdblk),
  1751. .shost_groups = myrs_shost_groups,
  1752. .sdev_groups = myrs_sdev_groups,
  1753. .this_id = -1,
  1754. };
  1755. static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
  1756. const struct pci_device_id *entry)
  1757. {
  1758. struct Scsi_Host *shost;
  1759. struct myrs_hba *cs;
  1760. shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
  1761. if (!shost)
  1762. return NULL;
  1763. shost->max_cmd_len = 16;
  1764. shost->max_lun = 256;
  1765. cs = shost_priv(shost);
  1766. mutex_init(&cs->dcmd_mutex);
  1767. mutex_init(&cs->cinfo_mutex);
  1768. cs->host = shost;
  1769. return cs;
  1770. }
  1771. /*
  1772. * RAID template functions
  1773. */
  1774. /**
  1775. * myrs_is_raid - return boolean indicating device is raid volume
  1776. * @dev: the device struct object
  1777. */
  1778. static int
  1779. myrs_is_raid(struct device *dev)
  1780. {
  1781. struct scsi_device *sdev = to_scsi_device(dev);
  1782. struct myrs_hba *cs = shost_priv(sdev->host);
  1783. return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
  1784. }
  1785. /**
  1786. * myrs_get_resync - get raid volume resync percent complete
  1787. * @dev: the device struct object
  1788. */
  1789. static void
  1790. myrs_get_resync(struct device *dev)
  1791. {
  1792. struct scsi_device *sdev = to_scsi_device(dev);
  1793. struct myrs_hba *cs = shost_priv(sdev->host);
  1794. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1795. u64 percent_complete = 0;
  1796. if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
  1797. return;
  1798. if (ldev_info->rbld_active) {
  1799. unsigned short ldev_num = ldev_info->ldev_num;
  1800. myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1801. percent_complete = ldev_info->rbld_lba * 100;
  1802. do_div(percent_complete, ldev_info->cfg_devsize);
  1803. }
  1804. raid_set_resync(myrs_raid_template, dev, percent_complete);
  1805. }
  1806. /**
  1807. * myrs_get_state - get raid volume status
  1808. * @dev: the device struct object
  1809. */
  1810. static void
  1811. myrs_get_state(struct device *dev)
  1812. {
  1813. struct scsi_device *sdev = to_scsi_device(dev);
  1814. struct myrs_hba *cs = shost_priv(sdev->host);
  1815. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1816. enum raid_state state = RAID_STATE_UNKNOWN;
  1817. if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
  1818. state = RAID_STATE_UNKNOWN;
  1819. else {
  1820. switch (ldev_info->dev_state) {
  1821. case MYRS_DEVICE_ONLINE:
  1822. state = RAID_STATE_ACTIVE;
  1823. break;
  1824. case MYRS_DEVICE_SUSPECTED_CRITICAL:
  1825. case MYRS_DEVICE_CRITICAL:
  1826. state = RAID_STATE_DEGRADED;
  1827. break;
  1828. case MYRS_DEVICE_REBUILD:
  1829. state = RAID_STATE_RESYNCING;
  1830. break;
  1831. case MYRS_DEVICE_UNCONFIGURED:
  1832. case MYRS_DEVICE_INVALID_STATE:
  1833. state = RAID_STATE_UNKNOWN;
  1834. break;
  1835. default:
  1836. state = RAID_STATE_OFFLINE;
  1837. }
  1838. }
  1839. raid_set_state(myrs_raid_template, dev, state);
  1840. }
  1841. static struct raid_function_template myrs_raid_functions = {
  1842. .cookie = &myrs_template,
  1843. .is_raid = myrs_is_raid,
  1844. .get_resync = myrs_get_resync,
  1845. .get_state = myrs_get_state,
  1846. };
  1847. /*
  1848. * PCI interface functions
  1849. */
  1850. static void myrs_flush_cache(struct myrs_hba *cs)
  1851. {
  1852. myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
  1853. }
  1854. static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
  1855. struct scsi_cmnd *scmd)
  1856. {
  1857. unsigned char status;
  1858. if (!cmd_blk)
  1859. return;
  1860. scsi_dma_unmap(scmd);
  1861. status = cmd_blk->status;
  1862. if (cmd_blk->sense) {
  1863. if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
  1864. unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
  1865. if (sense_len > cmd_blk->sense_len)
  1866. sense_len = cmd_blk->sense_len;
  1867. memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
  1868. }
  1869. dma_pool_free(cs->sense_pool, cmd_blk->sense,
  1870. cmd_blk->sense_addr);
  1871. cmd_blk->sense = NULL;
  1872. cmd_blk->sense_addr = 0;
  1873. }
  1874. if (cmd_blk->dcdb) {
  1875. dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
  1876. cmd_blk->dcdb_dma);
  1877. cmd_blk->dcdb = NULL;
  1878. cmd_blk->dcdb_dma = 0;
  1879. }
  1880. if (cmd_blk->sgl) {
  1881. dma_pool_free(cs->sg_pool, cmd_blk->sgl,
  1882. cmd_blk->sgl_addr);
  1883. cmd_blk->sgl = NULL;
  1884. cmd_blk->sgl_addr = 0;
  1885. }
  1886. if (cmd_blk->residual)
  1887. scsi_set_resid(scmd, cmd_blk->residual);
  1888. if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
  1889. status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
  1890. scmd->result = (DID_BAD_TARGET << 16);
  1891. else
  1892. scmd->result = (DID_OK << 16) | status;
  1893. scsi_done(scmd);
  1894. }
  1895. static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
  1896. {
  1897. if (!cmd_blk)
  1898. return;
  1899. if (cmd_blk->complete) {
  1900. complete(cmd_blk->complete);
  1901. cmd_blk->complete = NULL;
  1902. }
  1903. }
  1904. static void myrs_monitor(struct work_struct *work)
  1905. {
  1906. struct myrs_hba *cs = container_of(work, struct myrs_hba,
  1907. monitor_work.work);
  1908. struct Scsi_Host *shost = cs->host;
  1909. struct myrs_ctlr_info *info = cs->ctlr_info;
  1910. unsigned int epoch = cs->fwstat_buf->epoch;
  1911. unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
  1912. unsigned char status;
  1913. dev_dbg(&shost->shost_gendev, "monitor tick\n");
  1914. status = myrs_get_fwstatus(cs);
  1915. if (cs->needs_update) {
  1916. cs->needs_update = false;
  1917. mutex_lock(&cs->cinfo_mutex);
  1918. status = myrs_get_ctlr_info(cs);
  1919. mutex_unlock(&cs->cinfo_mutex);
  1920. }
  1921. if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
  1922. status = myrs_get_event(cs, cs->next_evseq,
  1923. cs->event_buf);
  1924. if (status == MYRS_STATUS_SUCCESS) {
  1925. myrs_log_event(cs, cs->event_buf);
  1926. cs->next_evseq++;
  1927. interval = 1;
  1928. }
  1929. }
  1930. if (time_after(jiffies, cs->secondary_monitor_time
  1931. + MYRS_SECONDARY_MONITOR_INTERVAL))
  1932. cs->secondary_monitor_time = jiffies;
  1933. if (info->bg_init_active +
  1934. info->ldev_init_active +
  1935. info->pdev_init_active +
  1936. info->cc_active +
  1937. info->rbld_active +
  1938. info->exp_active != 0) {
  1939. struct scsi_device *sdev;
  1940. shost_for_each_device(sdev, shost) {
  1941. struct myrs_ldev_info *ldev_info;
  1942. int ldev_num;
  1943. if (sdev->channel < info->physchan_present)
  1944. continue;
  1945. ldev_info = sdev->hostdata;
  1946. if (!ldev_info)
  1947. continue;
  1948. ldev_num = ldev_info->ldev_num;
  1949. myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1950. }
  1951. cs->needs_update = true;
  1952. }
  1953. if (epoch == cs->epoch &&
  1954. cs->fwstat_buf->next_evseq == cs->next_evseq &&
  1955. (cs->needs_update == false ||
  1956. time_before(jiffies, cs->primary_monitor_time
  1957. + MYRS_PRIMARY_MONITOR_INTERVAL))) {
  1958. interval = MYRS_SECONDARY_MONITOR_INTERVAL;
  1959. }
  1960. if (interval > 1)
  1961. cs->primary_monitor_time = jiffies;
  1962. queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
  1963. }
  1964. static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
  1965. {
  1966. struct Scsi_Host *shost = cs->host;
  1967. size_t elem_size, elem_align;
  1968. elem_align = sizeof(struct myrs_sge);
  1969. elem_size = shost->sg_tablesize * elem_align;
  1970. cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
  1971. elem_size, elem_align, 0);
  1972. if (cs->sg_pool == NULL) {
  1973. shost_printk(KERN_ERR, shost,
  1974. "Failed to allocate SG pool\n");
  1975. return false;
  1976. }
  1977. cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
  1978. MYRS_SENSE_SIZE, sizeof(int), 0);
  1979. if (cs->sense_pool == NULL) {
  1980. dma_pool_destroy(cs->sg_pool);
  1981. cs->sg_pool = NULL;
  1982. shost_printk(KERN_ERR, shost,
  1983. "Failed to allocate sense data pool\n");
  1984. return false;
  1985. }
  1986. cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
  1987. MYRS_DCDB_SIZE,
  1988. sizeof(unsigned char), 0);
  1989. if (!cs->dcdb_pool) {
  1990. dma_pool_destroy(cs->sg_pool);
  1991. cs->sg_pool = NULL;
  1992. dma_pool_destroy(cs->sense_pool);
  1993. cs->sense_pool = NULL;
  1994. shost_printk(KERN_ERR, shost,
  1995. "Failed to allocate DCDB pool\n");
  1996. return false;
  1997. }
  1998. snprintf(cs->work_q_name, sizeof(cs->work_q_name),
  1999. "myrs_wq_%d", shost->host_no);
  2000. cs->work_q = create_singlethread_workqueue(cs->work_q_name);
  2001. if (!cs->work_q) {
  2002. dma_pool_destroy(cs->dcdb_pool);
  2003. cs->dcdb_pool = NULL;
  2004. dma_pool_destroy(cs->sg_pool);
  2005. cs->sg_pool = NULL;
  2006. dma_pool_destroy(cs->sense_pool);
  2007. cs->sense_pool = NULL;
  2008. shost_printk(KERN_ERR, shost,
  2009. "Failed to create workqueue\n");
  2010. return false;
  2011. }
  2012. /* Initialize the Monitoring Timer. */
  2013. INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
  2014. queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
  2015. return true;
  2016. }
  2017. static void myrs_destroy_mempools(struct myrs_hba *cs)
  2018. {
  2019. cancel_delayed_work_sync(&cs->monitor_work);
  2020. destroy_workqueue(cs->work_q);
  2021. dma_pool_destroy(cs->sg_pool);
  2022. dma_pool_destroy(cs->dcdb_pool);
  2023. dma_pool_destroy(cs->sense_pool);
  2024. }
  2025. static void myrs_unmap(struct myrs_hba *cs)
  2026. {
  2027. kfree(cs->event_buf);
  2028. kfree(cs->ctlr_info);
  2029. if (cs->fwstat_buf) {
  2030. dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
  2031. cs->fwstat_buf, cs->fwstat_addr);
  2032. cs->fwstat_buf = NULL;
  2033. }
  2034. if (cs->first_stat_mbox) {
  2035. dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
  2036. cs->first_stat_mbox, cs->stat_mbox_addr);
  2037. cs->first_stat_mbox = NULL;
  2038. }
  2039. if (cs->first_cmd_mbox) {
  2040. dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
  2041. cs->first_cmd_mbox, cs->cmd_mbox_addr);
  2042. cs->first_cmd_mbox = NULL;
  2043. }
  2044. }
  2045. static void myrs_cleanup(struct myrs_hba *cs)
  2046. {
  2047. struct pci_dev *pdev = cs->pdev;
  2048. /* Free the memory mailbox, status, and related structures */
  2049. myrs_unmap(cs);
  2050. if (cs->mmio_base) {
  2051. if (cs->disable_intr)
  2052. cs->disable_intr(cs);
  2053. iounmap(cs->mmio_base);
  2054. cs->mmio_base = NULL;
  2055. }
  2056. if (cs->irq)
  2057. free_irq(cs->irq, cs);
  2058. if (cs->io_addr)
  2059. release_region(cs->io_addr, 0x80);
  2060. pci_set_drvdata(pdev, NULL);
  2061. pci_disable_device(pdev);
  2062. scsi_host_put(cs->host);
  2063. }
  2064. static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
  2065. const struct pci_device_id *entry)
  2066. {
  2067. struct myrs_privdata *privdata =
  2068. (struct myrs_privdata *)entry->driver_data;
  2069. irq_handler_t irq_handler = privdata->irq_handler;
  2070. unsigned int mmio_size = privdata->mmio_size;
  2071. struct myrs_hba *cs = NULL;
  2072. cs = myrs_alloc_host(pdev, entry);
  2073. if (!cs) {
  2074. dev_err(&pdev->dev, "Unable to allocate Controller\n");
  2075. return NULL;
  2076. }
  2077. cs->pdev = pdev;
  2078. if (pci_enable_device(pdev))
  2079. goto Failure;
  2080. cs->pci_addr = pci_resource_start(pdev, 0);
  2081. pci_set_drvdata(pdev, cs);
  2082. spin_lock_init(&cs->queue_lock);
  2083. /* Map the Controller Register Window. */
  2084. if (mmio_size < PAGE_SIZE)
  2085. mmio_size = PAGE_SIZE;
  2086. cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
  2087. if (cs->mmio_base == NULL) {
  2088. dev_err(&pdev->dev,
  2089. "Unable to map Controller Register Window\n");
  2090. goto Failure;
  2091. }
  2092. cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
  2093. if (privdata->hw_init(pdev, cs, cs->io_base))
  2094. goto Failure;
  2095. /* Acquire shared access to the IRQ Channel. */
  2096. if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
  2097. dev_err(&pdev->dev,
  2098. "Unable to acquire IRQ Channel %d\n", pdev->irq);
  2099. goto Failure;
  2100. }
  2101. cs->irq = pdev->irq;
  2102. return cs;
  2103. Failure:
  2104. dev_err(&pdev->dev,
  2105. "Failed to initialize Controller\n");
  2106. myrs_cleanup(cs);
  2107. return NULL;
  2108. }
  2109. /*
  2110. * myrs_err_status reports Controller BIOS Messages passed through
  2111. * the Error Status Register when the driver performs the BIOS handshaking.
  2112. * It returns true for fatal errors and false otherwise.
  2113. */
  2114. static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
  2115. unsigned char parm0, unsigned char parm1)
  2116. {
  2117. struct pci_dev *pdev = cs->pdev;
  2118. switch (status) {
  2119. case 0x00:
  2120. dev_info(&pdev->dev,
  2121. "Physical Device %d:%d Not Responding\n",
  2122. parm1, parm0);
  2123. break;
  2124. case 0x08:
  2125. dev_notice(&pdev->dev, "Spinning Up Drives\n");
  2126. break;
  2127. case 0x30:
  2128. dev_notice(&pdev->dev, "Configuration Checksum Error\n");
  2129. break;
  2130. case 0x60:
  2131. dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
  2132. break;
  2133. case 0x70:
  2134. dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
  2135. break;
  2136. case 0x90:
  2137. dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
  2138. parm1, parm0);
  2139. break;
  2140. case 0xA0:
  2141. dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
  2142. break;
  2143. case 0xB0:
  2144. dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
  2145. break;
  2146. case 0xD0:
  2147. dev_notice(&pdev->dev, "New Controller Configuration Found\n");
  2148. break;
  2149. case 0xF0:
  2150. dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
  2151. return true;
  2152. default:
  2153. dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
  2154. status);
  2155. return true;
  2156. }
  2157. return false;
  2158. }
  2159. /*
  2160. * Hardware-specific functions
  2161. */
  2162. /*
  2163. * DAC960 GEM Series Controllers.
  2164. */
  2165. static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
  2166. {
  2167. __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
  2168. writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
  2169. }
  2170. static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
  2171. {
  2172. __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
  2173. writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
  2174. }
  2175. static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
  2176. {
  2177. __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
  2178. writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
  2179. }
  2180. static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
  2181. {
  2182. __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
  2183. writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
  2184. }
  2185. static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
  2186. {
  2187. __le32 val;
  2188. val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
  2189. return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
  2190. }
  2191. static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
  2192. {
  2193. __le32 val;
  2194. val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
  2195. return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
  2196. }
  2197. static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
  2198. {
  2199. __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
  2200. writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
  2201. }
  2202. static inline void DAC960_GEM_ack_intr(void __iomem *base)
  2203. {
  2204. __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
  2205. DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
  2206. writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
  2207. }
  2208. static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
  2209. {
  2210. __le32 val;
  2211. val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
  2212. return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
  2213. }
  2214. static inline void DAC960_GEM_enable_intr(void __iomem *base)
  2215. {
  2216. __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
  2217. DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
  2218. writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
  2219. }
  2220. static inline void DAC960_GEM_disable_intr(void __iomem *base)
  2221. {
  2222. __le32 val = 0;
  2223. writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
  2224. }
  2225. static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
  2226. union myrs_cmd_mbox *mbox)
  2227. {
  2228. memcpy(&mem_mbox->words[1], &mbox->words[1],
  2229. sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
  2230. /* Barrier to avoid reordering */
  2231. wmb();
  2232. mem_mbox->words[0] = mbox->words[0];
  2233. /* Barrier to force PCI access */
  2234. mb();
  2235. }
  2236. static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
  2237. dma_addr_t cmd_mbox_addr)
  2238. {
  2239. dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
  2240. }
  2241. static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
  2242. {
  2243. return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
  2244. }
  2245. static inline bool
  2246. DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
  2247. unsigned char *param0, unsigned char *param1)
  2248. {
  2249. __le32 val;
  2250. val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
  2251. if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
  2252. return false;
  2253. *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
  2254. *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
  2255. *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
  2256. writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
  2257. return true;
  2258. }
  2259. static inline unsigned char
  2260. DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
  2261. {
  2262. unsigned char status;
  2263. while (DAC960_GEM_hw_mbox_is_full(base))
  2264. udelay(1);
  2265. DAC960_GEM_write_hw_mbox(base, mbox_addr);
  2266. DAC960_GEM_hw_mbox_new_cmd(base);
  2267. while (!DAC960_GEM_hw_mbox_status_available(base))
  2268. udelay(1);
  2269. status = DAC960_GEM_read_cmd_status(base);
  2270. DAC960_GEM_ack_hw_mbox_intr(base);
  2271. DAC960_GEM_ack_hw_mbox_status(base);
  2272. return status;
  2273. }
  2274. static int DAC960_GEM_hw_init(struct pci_dev *pdev,
  2275. struct myrs_hba *cs, void __iomem *base)
  2276. {
  2277. int timeout = 0;
  2278. unsigned char status, parm0, parm1;
  2279. DAC960_GEM_disable_intr(base);
  2280. DAC960_GEM_ack_hw_mbox_status(base);
  2281. udelay(1000);
  2282. while (DAC960_GEM_init_in_progress(base) &&
  2283. timeout < MYRS_MAILBOX_TIMEOUT) {
  2284. if (DAC960_GEM_read_error_status(base, &status,
  2285. &parm0, &parm1) &&
  2286. myrs_err_status(cs, status, parm0, parm1))
  2287. return -EIO;
  2288. udelay(10);
  2289. timeout++;
  2290. }
  2291. if (timeout == MYRS_MAILBOX_TIMEOUT) {
  2292. dev_err(&pdev->dev,
  2293. "Timeout waiting for Controller Initialisation\n");
  2294. return -ETIMEDOUT;
  2295. }
  2296. if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
  2297. dev_err(&pdev->dev,
  2298. "Unable to Enable Memory Mailbox Interface\n");
  2299. DAC960_GEM_reset_ctrl(base);
  2300. return -EAGAIN;
  2301. }
  2302. DAC960_GEM_enable_intr(base);
  2303. cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
  2304. cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
  2305. cs->disable_intr = DAC960_GEM_disable_intr;
  2306. cs->reset = DAC960_GEM_reset_ctrl;
  2307. return 0;
  2308. }
  2309. static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
  2310. {
  2311. struct myrs_hba *cs = arg;
  2312. void __iomem *base = cs->io_base;
  2313. struct myrs_stat_mbox *next_stat_mbox;
  2314. unsigned long flags;
  2315. spin_lock_irqsave(&cs->queue_lock, flags);
  2316. DAC960_GEM_ack_intr(base);
  2317. next_stat_mbox = cs->next_stat_mbox;
  2318. while (next_stat_mbox->id > 0) {
  2319. unsigned short id = next_stat_mbox->id;
  2320. struct scsi_cmnd *scmd = NULL;
  2321. struct myrs_cmdblk *cmd_blk = NULL;
  2322. if (id == MYRS_DCMD_TAG)
  2323. cmd_blk = &cs->dcmd_blk;
  2324. else if (id == MYRS_MCMD_TAG)
  2325. cmd_blk = &cs->mcmd_blk;
  2326. else {
  2327. scmd = scsi_host_find_tag(cs->host, id - 3);
  2328. if (scmd)
  2329. cmd_blk = scsi_cmd_priv(scmd);
  2330. }
  2331. if (cmd_blk) {
  2332. cmd_blk->status = next_stat_mbox->status;
  2333. cmd_blk->sense_len = next_stat_mbox->sense_len;
  2334. cmd_blk->residual = next_stat_mbox->residual;
  2335. } else
  2336. dev_err(&cs->pdev->dev,
  2337. "Unhandled command completion %d\n", id);
  2338. memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
  2339. if (++next_stat_mbox > cs->last_stat_mbox)
  2340. next_stat_mbox = cs->first_stat_mbox;
  2341. if (cmd_blk) {
  2342. if (id < 3)
  2343. myrs_handle_cmdblk(cs, cmd_blk);
  2344. else
  2345. myrs_handle_scsi(cs, cmd_blk, scmd);
  2346. }
  2347. }
  2348. cs->next_stat_mbox = next_stat_mbox;
  2349. spin_unlock_irqrestore(&cs->queue_lock, flags);
  2350. return IRQ_HANDLED;
  2351. }
  2352. static struct myrs_privdata DAC960_GEM_privdata = {
  2353. .hw_init = DAC960_GEM_hw_init,
  2354. .irq_handler = DAC960_GEM_intr_handler,
  2355. .mmio_size = DAC960_GEM_mmio_size,
  2356. };
  2357. /*
  2358. * DAC960 BA Series Controllers.
  2359. */
  2360. static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
  2361. {
  2362. writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
  2363. }
  2364. static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
  2365. {
  2366. writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
  2367. }
  2368. static inline void DAC960_BA_reset_ctrl(void __iomem *base)
  2369. {
  2370. writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
  2371. }
  2372. static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
  2373. {
  2374. writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
  2375. }
  2376. static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
  2377. {
  2378. u8 val;
  2379. val = readb(base + DAC960_BA_IDB_OFFSET);
  2380. return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
  2381. }
  2382. static inline bool DAC960_BA_init_in_progress(void __iomem *base)
  2383. {
  2384. u8 val;
  2385. val = readb(base + DAC960_BA_IDB_OFFSET);
  2386. return !(val & DAC960_BA_IDB_INIT_DONE);
  2387. }
  2388. static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
  2389. {
  2390. writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
  2391. }
  2392. static inline void DAC960_BA_ack_intr(void __iomem *base)
  2393. {
  2394. writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
  2395. base + DAC960_BA_ODB_OFFSET);
  2396. }
  2397. static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
  2398. {
  2399. u8 val;
  2400. val = readb(base + DAC960_BA_ODB_OFFSET);
  2401. return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
  2402. }
  2403. static inline void DAC960_BA_enable_intr(void __iomem *base)
  2404. {
  2405. writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
  2406. }
  2407. static inline void DAC960_BA_disable_intr(void __iomem *base)
  2408. {
  2409. writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
  2410. }
  2411. static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
  2412. union myrs_cmd_mbox *mbox)
  2413. {
  2414. memcpy(&mem_mbox->words[1], &mbox->words[1],
  2415. sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
  2416. /* Barrier to avoid reordering */
  2417. wmb();
  2418. mem_mbox->words[0] = mbox->words[0];
  2419. /* Barrier to force PCI access */
  2420. mb();
  2421. }
  2422. static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
  2423. dma_addr_t cmd_mbox_addr)
  2424. {
  2425. dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
  2426. }
  2427. static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
  2428. {
  2429. return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
  2430. }
  2431. static inline bool
  2432. DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
  2433. unsigned char *param0, unsigned char *param1)
  2434. {
  2435. u8 val;
  2436. val = readb(base + DAC960_BA_ERRSTS_OFFSET);
  2437. if (!(val & DAC960_BA_ERRSTS_PENDING))
  2438. return false;
  2439. val &= ~DAC960_BA_ERRSTS_PENDING;
  2440. *error = val;
  2441. *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
  2442. *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
  2443. writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
  2444. return true;
  2445. }
  2446. static inline unsigned char
  2447. DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
  2448. {
  2449. unsigned char status;
  2450. while (DAC960_BA_hw_mbox_is_full(base))
  2451. udelay(1);
  2452. DAC960_BA_write_hw_mbox(base, mbox_addr);
  2453. DAC960_BA_hw_mbox_new_cmd(base);
  2454. while (!DAC960_BA_hw_mbox_status_available(base))
  2455. udelay(1);
  2456. status = DAC960_BA_read_cmd_status(base);
  2457. DAC960_BA_ack_hw_mbox_intr(base);
  2458. DAC960_BA_ack_hw_mbox_status(base);
  2459. return status;
  2460. }
  2461. static int DAC960_BA_hw_init(struct pci_dev *pdev,
  2462. struct myrs_hba *cs, void __iomem *base)
  2463. {
  2464. int timeout = 0;
  2465. unsigned char status, parm0, parm1;
  2466. DAC960_BA_disable_intr(base);
  2467. DAC960_BA_ack_hw_mbox_status(base);
  2468. udelay(1000);
  2469. while (DAC960_BA_init_in_progress(base) &&
  2470. timeout < MYRS_MAILBOX_TIMEOUT) {
  2471. if (DAC960_BA_read_error_status(base, &status,
  2472. &parm0, &parm1) &&
  2473. myrs_err_status(cs, status, parm0, parm1))
  2474. return -EIO;
  2475. udelay(10);
  2476. timeout++;
  2477. }
  2478. if (timeout == MYRS_MAILBOX_TIMEOUT) {
  2479. dev_err(&pdev->dev,
  2480. "Timeout waiting for Controller Initialisation\n");
  2481. return -ETIMEDOUT;
  2482. }
  2483. if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
  2484. dev_err(&pdev->dev,
  2485. "Unable to Enable Memory Mailbox Interface\n");
  2486. DAC960_BA_reset_ctrl(base);
  2487. return -EAGAIN;
  2488. }
  2489. DAC960_BA_enable_intr(base);
  2490. cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
  2491. cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
  2492. cs->disable_intr = DAC960_BA_disable_intr;
  2493. cs->reset = DAC960_BA_reset_ctrl;
  2494. return 0;
  2495. }
  2496. static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
  2497. {
  2498. struct myrs_hba *cs = arg;
  2499. void __iomem *base = cs->io_base;
  2500. struct myrs_stat_mbox *next_stat_mbox;
  2501. unsigned long flags;
  2502. spin_lock_irqsave(&cs->queue_lock, flags);
  2503. DAC960_BA_ack_intr(base);
  2504. next_stat_mbox = cs->next_stat_mbox;
  2505. while (next_stat_mbox->id > 0) {
  2506. unsigned short id = next_stat_mbox->id;
  2507. struct scsi_cmnd *scmd = NULL;
  2508. struct myrs_cmdblk *cmd_blk = NULL;
  2509. if (id == MYRS_DCMD_TAG)
  2510. cmd_blk = &cs->dcmd_blk;
  2511. else if (id == MYRS_MCMD_TAG)
  2512. cmd_blk = &cs->mcmd_blk;
  2513. else {
  2514. scmd = scsi_host_find_tag(cs->host, id - 3);
  2515. if (scmd)
  2516. cmd_blk = scsi_cmd_priv(scmd);
  2517. }
  2518. if (cmd_blk) {
  2519. cmd_blk->status = next_stat_mbox->status;
  2520. cmd_blk->sense_len = next_stat_mbox->sense_len;
  2521. cmd_blk->residual = next_stat_mbox->residual;
  2522. } else
  2523. dev_err(&cs->pdev->dev,
  2524. "Unhandled command completion %d\n", id);
  2525. memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
  2526. if (++next_stat_mbox > cs->last_stat_mbox)
  2527. next_stat_mbox = cs->first_stat_mbox;
  2528. if (cmd_blk) {
  2529. if (id < 3)
  2530. myrs_handle_cmdblk(cs, cmd_blk);
  2531. else
  2532. myrs_handle_scsi(cs, cmd_blk, scmd);
  2533. }
  2534. }
  2535. cs->next_stat_mbox = next_stat_mbox;
  2536. spin_unlock_irqrestore(&cs->queue_lock, flags);
  2537. return IRQ_HANDLED;
  2538. }
  2539. static struct myrs_privdata DAC960_BA_privdata = {
  2540. .hw_init = DAC960_BA_hw_init,
  2541. .irq_handler = DAC960_BA_intr_handler,
  2542. .mmio_size = DAC960_BA_mmio_size,
  2543. };
  2544. /*
  2545. * DAC960 LP Series Controllers.
  2546. */
  2547. static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
  2548. {
  2549. writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
  2550. }
  2551. static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
  2552. {
  2553. writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
  2554. }
  2555. static inline void DAC960_LP_reset_ctrl(void __iomem *base)
  2556. {
  2557. writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
  2558. }
  2559. static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
  2560. {
  2561. writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
  2562. }
  2563. static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
  2564. {
  2565. u8 val;
  2566. val = readb(base + DAC960_LP_IDB_OFFSET);
  2567. return val & DAC960_LP_IDB_HWMBOX_FULL;
  2568. }
  2569. static inline bool DAC960_LP_init_in_progress(void __iomem *base)
  2570. {
  2571. u8 val;
  2572. val = readb(base + DAC960_LP_IDB_OFFSET);
  2573. return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
  2574. }
  2575. static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
  2576. {
  2577. writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
  2578. }
  2579. static inline void DAC960_LP_ack_intr(void __iomem *base)
  2580. {
  2581. writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
  2582. base + DAC960_LP_ODB_OFFSET);
  2583. }
  2584. static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
  2585. {
  2586. u8 val;
  2587. val = readb(base + DAC960_LP_ODB_OFFSET);
  2588. return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
  2589. }
  2590. static inline void DAC960_LP_enable_intr(void __iomem *base)
  2591. {
  2592. writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
  2593. }
  2594. static inline void DAC960_LP_disable_intr(void __iomem *base)
  2595. {
  2596. writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
  2597. }
  2598. static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
  2599. union myrs_cmd_mbox *mbox)
  2600. {
  2601. memcpy(&mem_mbox->words[1], &mbox->words[1],
  2602. sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
  2603. /* Barrier to avoid reordering */
  2604. wmb();
  2605. mem_mbox->words[0] = mbox->words[0];
  2606. /* Barrier to force PCI access */
  2607. mb();
  2608. }
  2609. static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
  2610. dma_addr_t cmd_mbox_addr)
  2611. {
  2612. dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
  2613. }
  2614. static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
  2615. {
  2616. return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
  2617. }
  2618. static inline bool
  2619. DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
  2620. unsigned char *param0, unsigned char *param1)
  2621. {
  2622. u8 val;
  2623. val = readb(base + DAC960_LP_ERRSTS_OFFSET);
  2624. if (!(val & DAC960_LP_ERRSTS_PENDING))
  2625. return false;
  2626. val &= ~DAC960_LP_ERRSTS_PENDING;
  2627. *error = val;
  2628. *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
  2629. *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
  2630. writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
  2631. return true;
  2632. }
  2633. static inline unsigned char
  2634. DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
  2635. {
  2636. unsigned char status;
  2637. while (DAC960_LP_hw_mbox_is_full(base))
  2638. udelay(1);
  2639. DAC960_LP_write_hw_mbox(base, mbox_addr);
  2640. DAC960_LP_hw_mbox_new_cmd(base);
  2641. while (!DAC960_LP_hw_mbox_status_available(base))
  2642. udelay(1);
  2643. status = DAC960_LP_read_cmd_status(base);
  2644. DAC960_LP_ack_hw_mbox_intr(base);
  2645. DAC960_LP_ack_hw_mbox_status(base);
  2646. return status;
  2647. }
  2648. static int DAC960_LP_hw_init(struct pci_dev *pdev,
  2649. struct myrs_hba *cs, void __iomem *base)
  2650. {
  2651. int timeout = 0;
  2652. unsigned char status, parm0, parm1;
  2653. DAC960_LP_disable_intr(base);
  2654. DAC960_LP_ack_hw_mbox_status(base);
  2655. udelay(1000);
  2656. while (DAC960_LP_init_in_progress(base) &&
  2657. timeout < MYRS_MAILBOX_TIMEOUT) {
  2658. if (DAC960_LP_read_error_status(base, &status,
  2659. &parm0, &parm1) &&
  2660. myrs_err_status(cs, status, parm0, parm1))
  2661. return -EIO;
  2662. udelay(10);
  2663. timeout++;
  2664. }
  2665. if (timeout == MYRS_MAILBOX_TIMEOUT) {
  2666. dev_err(&pdev->dev,
  2667. "Timeout waiting for Controller Initialisation\n");
  2668. return -ETIMEDOUT;
  2669. }
  2670. if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
  2671. dev_err(&pdev->dev,
  2672. "Unable to Enable Memory Mailbox Interface\n");
  2673. DAC960_LP_reset_ctrl(base);
  2674. return -ENODEV;
  2675. }
  2676. DAC960_LP_enable_intr(base);
  2677. cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
  2678. cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
  2679. cs->disable_intr = DAC960_LP_disable_intr;
  2680. cs->reset = DAC960_LP_reset_ctrl;
  2681. return 0;
  2682. }
  2683. static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
  2684. {
  2685. struct myrs_hba *cs = arg;
  2686. void __iomem *base = cs->io_base;
  2687. struct myrs_stat_mbox *next_stat_mbox;
  2688. unsigned long flags;
  2689. spin_lock_irqsave(&cs->queue_lock, flags);
  2690. DAC960_LP_ack_intr(base);
  2691. next_stat_mbox = cs->next_stat_mbox;
  2692. while (next_stat_mbox->id > 0) {
  2693. unsigned short id = next_stat_mbox->id;
  2694. struct scsi_cmnd *scmd = NULL;
  2695. struct myrs_cmdblk *cmd_blk = NULL;
  2696. if (id == MYRS_DCMD_TAG)
  2697. cmd_blk = &cs->dcmd_blk;
  2698. else if (id == MYRS_MCMD_TAG)
  2699. cmd_blk = &cs->mcmd_blk;
  2700. else {
  2701. scmd = scsi_host_find_tag(cs->host, id - 3);
  2702. if (scmd)
  2703. cmd_blk = scsi_cmd_priv(scmd);
  2704. }
  2705. if (cmd_blk) {
  2706. cmd_blk->status = next_stat_mbox->status;
  2707. cmd_blk->sense_len = next_stat_mbox->sense_len;
  2708. cmd_blk->residual = next_stat_mbox->residual;
  2709. } else
  2710. dev_err(&cs->pdev->dev,
  2711. "Unhandled command completion %d\n", id);
  2712. memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
  2713. if (++next_stat_mbox > cs->last_stat_mbox)
  2714. next_stat_mbox = cs->first_stat_mbox;
  2715. if (cmd_blk) {
  2716. if (id < 3)
  2717. myrs_handle_cmdblk(cs, cmd_blk);
  2718. else
  2719. myrs_handle_scsi(cs, cmd_blk, scmd);
  2720. }
  2721. }
  2722. cs->next_stat_mbox = next_stat_mbox;
  2723. spin_unlock_irqrestore(&cs->queue_lock, flags);
  2724. return IRQ_HANDLED;
  2725. }
  2726. static struct myrs_privdata DAC960_LP_privdata = {
  2727. .hw_init = DAC960_LP_hw_init,
  2728. .irq_handler = DAC960_LP_intr_handler,
  2729. .mmio_size = DAC960_LP_mmio_size,
  2730. };
  2731. /*
  2732. * Module functions
  2733. */
  2734. static int
  2735. myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
  2736. {
  2737. struct myrs_hba *cs;
  2738. int ret;
  2739. cs = myrs_detect(dev, entry);
  2740. if (!cs)
  2741. return -ENODEV;
  2742. ret = myrs_get_config(cs);
  2743. if (ret < 0) {
  2744. myrs_cleanup(cs);
  2745. return ret;
  2746. }
  2747. if (!myrs_create_mempools(dev, cs)) {
  2748. ret = -ENOMEM;
  2749. goto failed;
  2750. }
  2751. ret = scsi_add_host(cs->host, &dev->dev);
  2752. if (ret) {
  2753. dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
  2754. myrs_destroy_mempools(cs);
  2755. goto failed;
  2756. }
  2757. scsi_scan_host(cs->host);
  2758. return 0;
  2759. failed:
  2760. myrs_cleanup(cs);
  2761. return ret;
  2762. }
  2763. static void myrs_remove(struct pci_dev *pdev)
  2764. {
  2765. struct myrs_hba *cs = pci_get_drvdata(pdev);
  2766. if (cs == NULL)
  2767. return;
  2768. shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
  2769. myrs_flush_cache(cs);
  2770. myrs_destroy_mempools(cs);
  2771. myrs_cleanup(cs);
  2772. }
  2773. static const struct pci_device_id myrs_id_table[] = {
  2774. {
  2775. PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
  2776. PCI_DEVICE_ID_MYLEX_DAC960_GEM,
  2777. PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
  2778. .driver_data = (unsigned long) &DAC960_GEM_privdata,
  2779. },
  2780. {
  2781. PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
  2782. },
  2783. {
  2784. PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
  2785. },
  2786. {0, },
  2787. };
  2788. MODULE_DEVICE_TABLE(pci, myrs_id_table);
  2789. static struct pci_driver myrs_pci_driver = {
  2790. .name = "myrs",
  2791. .id_table = myrs_id_table,
  2792. .probe = myrs_probe,
  2793. .remove = myrs_remove,
  2794. };
  2795. static int __init myrs_init_module(void)
  2796. {
  2797. int ret;
  2798. myrs_raid_template = raid_class_attach(&myrs_raid_functions);
  2799. if (!myrs_raid_template)
  2800. return -ENODEV;
  2801. ret = pci_register_driver(&myrs_pci_driver);
  2802. if (ret)
  2803. raid_class_release(myrs_raid_template);
  2804. return ret;
  2805. }
  2806. static void __exit myrs_cleanup_module(void)
  2807. {
  2808. pci_unregister_driver(&myrs_pci_driver);
  2809. raid_class_release(myrs_raid_template);
  2810. }
  2811. module_init(myrs_init_module);
  2812. module_exit(myrs_cleanup_module);
  2813. MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
  2814. MODULE_AUTHOR("Hannes Reinecke <[email protected]>");
  2815. MODULE_LICENSE("GPL");