mesh.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware)
  4. * bus adaptor found on Power Macintosh computers.
  5. * We assume the MESH is connected to a DBDMA (descriptor-based DMA)
  6. * controller.
  7. *
  8. * Paul Mackerras, August 1996.
  9. * Copyright (C) 1996 Paul Mackerras.
  10. *
  11. * Apr. 21 2002 - BenH Rework bus reset code for new error handler
  12. * Add delay after initial bus reset
  13. * Add module parameters
  14. *
  15. * Sep. 27 2003 - BenH Move to new driver model, fix some write posting
  16. * issues
  17. * To do:
  18. * - handle aborts correctly
  19. * - retry arbitration if lost (unless higher levels do this for us)
  20. * - power down the chip when no device is detected
  21. */
  22. #include <linux/module.h>
  23. #include <linux/kernel.h>
  24. #include <linux/delay.h>
  25. #include <linux/types.h>
  26. #include <linux/string.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/stat.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/reboot.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/pci.h>
  34. #include <linux/pgtable.h>
  35. #include <asm/dbdma.h>
  36. #include <asm/io.h>
  37. #include <asm/prom.h>
  38. #include <asm/irq.h>
  39. #include <asm/hydra.h>
  40. #include <asm/processor.h>
  41. #include <asm/setup.h>
  42. #include <asm/pmac_feature.h>
  43. #include <asm/macio.h>
  44. #include <scsi/scsi.h>
  45. #include <scsi/scsi_cmnd.h>
  46. #include <scsi/scsi_device.h>
  47. #include <scsi/scsi_host.h>
  48. #include "mesh.h"
  49. #if 1
  50. #undef KERN_DEBUG
  51. #define KERN_DEBUG KERN_WARNING
  52. #endif
  53. MODULE_AUTHOR("Paul Mackerras ([email protected])");
  54. MODULE_DESCRIPTION("PowerMac MESH SCSI driver");
  55. MODULE_LICENSE("GPL");
  56. static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE;
  57. static int sync_targets = 0xff;
  58. static int resel_targets = 0xff;
  59. static int debug_targets = 0; /* print debug for these targets */
  60. static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS;
  61. module_param(sync_rate, int, 0);
  62. MODULE_PARM_DESC(sync_rate, "Synchronous rate (0..10, 0=async)");
  63. module_param(sync_targets, int, 0);
  64. MODULE_PARM_DESC(sync_targets, "Bitmask of targets allowed to set synchronous");
  65. module_param(resel_targets, int, 0);
  66. MODULE_PARM_DESC(resel_targets, "Bitmask of targets allowed to set disconnect");
  67. module_param(debug_targets, int, 0644);
  68. MODULE_PARM_DESC(debug_targets, "Bitmask of debugged targets");
  69. module_param(init_reset_delay, int, 0);
  70. MODULE_PARM_DESC(init_reset_delay, "Initial bus reset delay (0=no reset)");
  71. static int mesh_sync_period = 100;
  72. static int mesh_sync_offset = 0;
  73. static unsigned char use_active_neg = 0; /* bit mask for SEQ_ACTIVE_NEG if used */
  74. #define ALLOW_SYNC(tgt) ((sync_targets >> (tgt)) & 1)
  75. #define ALLOW_RESEL(tgt) ((resel_targets >> (tgt)) & 1)
  76. #define ALLOW_DEBUG(tgt) ((debug_targets >> (tgt)) & 1)
  77. #define DEBUG_TARGET(cmd) ((cmd) && ALLOW_DEBUG((cmd)->device->id))
  78. #undef MESH_DBG
  79. #define N_DBG_LOG 50
  80. #define N_DBG_SLOG 20
  81. #define NUM_DBG_EVENTS 13
  82. #undef DBG_USE_TB /* bombs on 601 */
  83. struct dbglog {
  84. char *fmt;
  85. u32 tb;
  86. u8 phase;
  87. u8 bs0;
  88. u8 bs1;
  89. u8 tgt;
  90. int d;
  91. };
  92. enum mesh_phase {
  93. idle,
  94. arbitrating,
  95. selecting,
  96. commanding,
  97. dataing,
  98. statusing,
  99. busfreeing,
  100. disconnecting,
  101. reselecting,
  102. sleeping
  103. };
  104. enum msg_phase {
  105. msg_none,
  106. msg_out,
  107. msg_out_xxx,
  108. msg_out_last,
  109. msg_in,
  110. msg_in_bad,
  111. };
  112. enum sdtr_phase {
  113. do_sdtr,
  114. sdtr_sent,
  115. sdtr_done
  116. };
  117. struct mesh_target {
  118. enum sdtr_phase sdtr_state;
  119. int sync_params;
  120. int data_goes_out; /* guess as to data direction */
  121. struct scsi_cmnd *current_req;
  122. u32 saved_ptr;
  123. #ifdef MESH_DBG
  124. int log_ix;
  125. int n_log;
  126. struct dbglog log[N_DBG_LOG];
  127. #endif
  128. };
  129. struct mesh_state {
  130. volatile struct mesh_regs __iomem *mesh;
  131. int meshintr;
  132. volatile struct dbdma_regs __iomem *dma;
  133. int dmaintr;
  134. struct Scsi_Host *host;
  135. struct mesh_state *next;
  136. struct scsi_cmnd *request_q;
  137. struct scsi_cmnd *request_qtail;
  138. enum mesh_phase phase; /* what we're currently trying to do */
  139. enum msg_phase msgphase;
  140. int conn_tgt; /* target we're connected to */
  141. struct scsi_cmnd *current_req; /* req we're currently working on */
  142. int data_ptr;
  143. int dma_started;
  144. int dma_count;
  145. int stat;
  146. int aborting;
  147. int expect_reply;
  148. int n_msgin;
  149. u8 msgin[16];
  150. int n_msgout;
  151. int last_n_msgout;
  152. u8 msgout[16];
  153. struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */
  154. dma_addr_t dma_cmd_bus;
  155. void *dma_cmd_space;
  156. int dma_cmd_size;
  157. int clk_freq;
  158. struct mesh_target tgts[8];
  159. struct macio_dev *mdev;
  160. struct pci_dev* pdev;
  161. #ifdef MESH_DBG
  162. int log_ix;
  163. int n_log;
  164. struct dbglog log[N_DBG_SLOG];
  165. #endif
  166. };
  167. /*
  168. * Driver is too messy, we need a few prototypes...
  169. */
  170. static void mesh_done(struct mesh_state *ms, int start_next);
  171. static void mesh_interrupt(struct mesh_state *ms);
  172. static void cmd_complete(struct mesh_state *ms);
  173. static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd);
  174. static void halt_dma(struct mesh_state *ms);
  175. static void phase_mismatch(struct mesh_state *ms);
  176. /*
  177. * Some debugging & logging routines
  178. */
  179. #ifdef MESH_DBG
  180. static inline u32 readtb(void)
  181. {
  182. u32 tb;
  183. #ifdef DBG_USE_TB
  184. /* Beware: if you enable this, it will crash on 601s. */
  185. asm ("mftb %0" : "=r" (tb) : );
  186. #else
  187. tb = 0;
  188. #endif
  189. return tb;
  190. }
  191. static void dlog(struct mesh_state *ms, char *fmt, int a)
  192. {
  193. struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
  194. struct dbglog *tlp, *slp;
  195. tlp = &tp->log[tp->log_ix];
  196. slp = &ms->log[ms->log_ix];
  197. tlp->fmt = fmt;
  198. tlp->tb = readtb();
  199. tlp->phase = (ms->msgphase << 4) + ms->phase;
  200. tlp->bs0 = ms->mesh->bus_status0;
  201. tlp->bs1 = ms->mesh->bus_status1;
  202. tlp->tgt = ms->conn_tgt;
  203. tlp->d = a;
  204. *slp = *tlp;
  205. if (++tp->log_ix >= N_DBG_LOG)
  206. tp->log_ix = 0;
  207. if (tp->n_log < N_DBG_LOG)
  208. ++tp->n_log;
  209. if (++ms->log_ix >= N_DBG_SLOG)
  210. ms->log_ix = 0;
  211. if (ms->n_log < N_DBG_SLOG)
  212. ++ms->n_log;
  213. }
  214. static void dumplog(struct mesh_state *ms, int t)
  215. {
  216. struct mesh_target *tp = &ms->tgts[t];
  217. struct dbglog *lp;
  218. int i;
  219. if (tp->n_log == 0)
  220. return;
  221. i = tp->log_ix - tp->n_log;
  222. if (i < 0)
  223. i += N_DBG_LOG;
  224. tp->n_log = 0;
  225. do {
  226. lp = &tp->log[i];
  227. printk(KERN_DEBUG "mesh log %d: bs=%.2x%.2x ph=%.2x ",
  228. t, lp->bs1, lp->bs0, lp->phase);
  229. #ifdef DBG_USE_TB
  230. printk("tb=%10u ", lp->tb);
  231. #endif
  232. printk(lp->fmt, lp->d);
  233. printk("\n");
  234. if (++i >= N_DBG_LOG)
  235. i = 0;
  236. } while (i != tp->log_ix);
  237. }
  238. static void dumpslog(struct mesh_state *ms)
  239. {
  240. struct dbglog *lp;
  241. int i;
  242. if (ms->n_log == 0)
  243. return;
  244. i = ms->log_ix - ms->n_log;
  245. if (i < 0)
  246. i += N_DBG_SLOG;
  247. ms->n_log = 0;
  248. do {
  249. lp = &ms->log[i];
  250. printk(KERN_DEBUG "mesh log: bs=%.2x%.2x ph=%.2x t%d ",
  251. lp->bs1, lp->bs0, lp->phase, lp->tgt);
  252. #ifdef DBG_USE_TB
  253. printk("tb=%10u ", lp->tb);
  254. #endif
  255. printk(lp->fmt, lp->d);
  256. printk("\n");
  257. if (++i >= N_DBG_SLOG)
  258. i = 0;
  259. } while (i != ms->log_ix);
  260. }
  261. #else
  262. static inline void dlog(struct mesh_state *ms, char *fmt, int a)
  263. {}
  264. static inline void dumplog(struct mesh_state *ms, int tgt)
  265. {}
  266. static inline void dumpslog(struct mesh_state *ms)
  267. {}
  268. #endif /* MESH_DBG */
  269. #define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
  270. static void
  271. mesh_dump_regs(struct mesh_state *ms)
  272. {
  273. volatile struct mesh_regs __iomem *mr = ms->mesh;
  274. volatile struct dbdma_regs __iomem *md = ms->dma;
  275. int t;
  276. struct mesh_target *tp;
  277. printk(KERN_DEBUG "mesh: state at %p, regs at %p, dma at %p\n",
  278. ms, mr, md);
  279. printk(KERN_DEBUG " ct=%4x seq=%2x bs=%4x fc=%2x "
  280. "exc=%2x err=%2x im=%2x int=%2x sp=%2x\n",
  281. (mr->count_hi << 8) + mr->count_lo, mr->sequence,
  282. (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count,
  283. mr->exception, mr->error, mr->intr_mask, mr->interrupt,
  284. mr->sync_params);
  285. while(in_8(&mr->fifo_count))
  286. printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo));
  287. printk(KERN_DEBUG " dma stat=%x cmdptr=%x\n",
  288. in_le32(&md->status), in_le32(&md->cmdptr));
  289. printk(KERN_DEBUG " phase=%d msgphase=%d conn_tgt=%d data_ptr=%d\n",
  290. ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr);
  291. printk(KERN_DEBUG " dma_st=%d dma_ct=%d n_msgout=%d\n",
  292. ms->dma_started, ms->dma_count, ms->n_msgout);
  293. for (t = 0; t < 8; ++t) {
  294. tp = &ms->tgts[t];
  295. if (tp->current_req == NULL)
  296. continue;
  297. printk(KERN_DEBUG " target %d: req=%p goes_out=%d saved_ptr=%d\n",
  298. t, tp->current_req, tp->data_goes_out, tp->saved_ptr);
  299. }
  300. }
  301. /*
  302. * Flush write buffers on the bus path to the mesh
  303. */
  304. static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr)
  305. {
  306. (void)in_8(&mr->mesh_id);
  307. }
  308. /* Called with meshinterrupt disabled, initialize the chipset
  309. * and eventually do the initial bus reset. The lock must not be
  310. * held since we can schedule.
  311. */
  312. static void mesh_init(struct mesh_state *ms)
  313. {
  314. volatile struct mesh_regs __iomem *mr = ms->mesh;
  315. volatile struct dbdma_regs __iomem *md = ms->dma;
  316. mesh_flush_io(mr);
  317. udelay(100);
  318. /* Reset controller */
  319. out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
  320. out_8(&mr->exception, 0xff); /* clear all exception bits */
  321. out_8(&mr->error, 0xff); /* clear all error bits */
  322. out_8(&mr->sequence, SEQ_RESETMESH);
  323. mesh_flush_io(mr);
  324. udelay(10);
  325. out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  326. out_8(&mr->source_id, ms->host->this_id);
  327. out_8(&mr->sel_timeout, 25); /* 250ms */
  328. out_8(&mr->sync_params, ASYNC_PARAMS);
  329. if (init_reset_delay) {
  330. printk(KERN_INFO "mesh: performing initial bus reset...\n");
  331. /* Reset bus */
  332. out_8(&mr->bus_status1, BS1_RST); /* assert RST */
  333. mesh_flush_io(mr);
  334. udelay(30); /* leave it on for >= 25us */
  335. out_8(&mr->bus_status1, 0); /* negate RST */
  336. mesh_flush_io(mr);
  337. /* Wait for bus to come back */
  338. msleep(init_reset_delay);
  339. }
  340. /* Reconfigure controller */
  341. out_8(&mr->interrupt, 0xff); /* clear all interrupt bits */
  342. out_8(&mr->sequence, SEQ_FLUSHFIFO);
  343. mesh_flush_io(mr);
  344. udelay(1);
  345. out_8(&mr->sync_params, ASYNC_PARAMS);
  346. out_8(&mr->sequence, SEQ_ENBRESEL);
  347. ms->phase = idle;
  348. ms->msgphase = msg_none;
  349. }
  350. static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
  351. {
  352. volatile struct mesh_regs __iomem *mr = ms->mesh;
  353. int t, id;
  354. id = cmd->device->id;
  355. ms->current_req = cmd;
  356. ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE;
  357. ms->tgts[id].current_req = cmd;
  358. #if 1
  359. if (DEBUG_TARGET(cmd)) {
  360. int i;
  361. printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id);
  362. for (i = 0; i < cmd->cmd_len; ++i)
  363. printk(" %x", cmd->cmnd[i]);
  364. printk(" use_sg=%d buffer=%p bufflen=%u\n",
  365. scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd));
  366. }
  367. #endif
  368. if (ms->dma_started)
  369. panic("mesh: double DMA start !\n");
  370. ms->phase = arbitrating;
  371. ms->msgphase = msg_none;
  372. ms->data_ptr = 0;
  373. ms->dma_started = 0;
  374. ms->n_msgout = 0;
  375. ms->last_n_msgout = 0;
  376. ms->expect_reply = 0;
  377. ms->conn_tgt = id;
  378. ms->tgts[id].saved_ptr = 0;
  379. ms->stat = DID_OK;
  380. ms->aborting = 0;
  381. #ifdef MESH_DBG
  382. ms->tgts[id].n_log = 0;
  383. dlog(ms, "start cmd=%x", (int) cmd);
  384. #endif
  385. /* Off we go */
  386. dlog(ms, "about to arb, intr/exc/err/fc=%.8x",
  387. MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
  388. out_8(&mr->interrupt, INT_CMDDONE);
  389. out_8(&mr->sequence, SEQ_ENBRESEL);
  390. mesh_flush_io(mr);
  391. udelay(1);
  392. if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
  393. /*
  394. * Some other device has the bus or is arbitrating for it -
  395. * probably a target which is about to reselect us.
  396. */
  397. dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x",
  398. MKWORD(mr->interrupt, mr->exception,
  399. mr->error, mr->fifo_count));
  400. for (t = 100; t > 0; --t) {
  401. if ((in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) == 0)
  402. break;
  403. if (in_8(&mr->interrupt) != 0) {
  404. dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x",
  405. MKWORD(mr->interrupt, mr->exception,
  406. mr->error, mr->fifo_count));
  407. mesh_interrupt(ms);
  408. if (ms->phase != arbitrating)
  409. return;
  410. }
  411. udelay(1);
  412. }
  413. if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
  414. /* XXX should try again in a little while */
  415. ms->stat = DID_BUS_BUSY;
  416. ms->phase = idle;
  417. mesh_done(ms, 0);
  418. return;
  419. }
  420. }
  421. /*
  422. * Apparently the mesh has a bug where it will assert both its
  423. * own bit and the target's bit on the bus during arbitration.
  424. */
  425. out_8(&mr->dest_id, mr->source_id);
  426. /*
  427. * There appears to be a race with reselection sometimes,
  428. * where a target reselects us just as we issue the
  429. * arbitrate command. It seems that then the arbitrate
  430. * command just hangs waiting for the bus to be free
  431. * without giving us a reselection exception.
  432. * The only way I have found to get it to respond correctly
  433. * is this: disable reselection before issuing the arbitrate
  434. * command, then after issuing it, if it looks like a target
  435. * is trying to reselect us, reset the mesh and then enable
  436. * reselection.
  437. */
  438. out_8(&mr->sequence, SEQ_DISRESEL);
  439. if (in_8(&mr->interrupt) != 0) {
  440. dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x",
  441. MKWORD(mr->interrupt, mr->exception,
  442. mr->error, mr->fifo_count));
  443. mesh_interrupt(ms);
  444. if (ms->phase != arbitrating)
  445. return;
  446. dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x",
  447. MKWORD(mr->interrupt, mr->exception,
  448. mr->error, mr->fifo_count));
  449. }
  450. out_8(&mr->sequence, SEQ_ARBITRATE);
  451. for (t = 230; t > 0; --t) {
  452. if (in_8(&mr->interrupt) != 0)
  453. break;
  454. udelay(1);
  455. }
  456. dlog(ms, "after arb, intr/exc/err/fc=%.8x",
  457. MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
  458. if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
  459. && (in_8(&mr->bus_status0) & BS0_IO)) {
  460. /* looks like a reselection - try resetting the mesh */
  461. dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x",
  462. MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
  463. out_8(&mr->sequence, SEQ_RESETMESH);
  464. mesh_flush_io(mr);
  465. udelay(10);
  466. out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  467. out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  468. out_8(&mr->sequence, SEQ_ENBRESEL);
  469. mesh_flush_io(mr);
  470. for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t)
  471. udelay(1);
  472. dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x",
  473. MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
  474. #ifndef MESH_MULTIPLE_HOSTS
  475. if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
  476. && (in_8(&mr->bus_status0) & BS0_IO)) {
  477. printk(KERN_ERR "mesh: controller not responding"
  478. " to reselection!\n");
  479. /*
  480. * If this is a target reselecting us, and the
  481. * mesh isn't responding, the higher levels of
  482. * the scsi code will eventually time out and
  483. * reset the bus.
  484. */
  485. }
  486. #endif
  487. }
  488. }
  489. /*
  490. * Start the next command for a MESH.
  491. * Should be called with interrupts disabled.
  492. */
  493. static void mesh_start(struct mesh_state *ms)
  494. {
  495. struct scsi_cmnd *cmd, *prev, *next;
  496. if (ms->phase != idle || ms->current_req != NULL) {
  497. printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)",
  498. ms->phase, ms);
  499. return;
  500. }
  501. while (ms->phase == idle) {
  502. prev = NULL;
  503. for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) {
  504. if (cmd == NULL)
  505. return;
  506. if (ms->tgts[cmd->device->id].current_req == NULL)
  507. break;
  508. prev = cmd;
  509. }
  510. next = (struct scsi_cmnd *) cmd->host_scribble;
  511. if (prev == NULL)
  512. ms->request_q = next;
  513. else
  514. prev->host_scribble = (void *) next;
  515. if (next == NULL)
  516. ms->request_qtail = prev;
  517. mesh_start_cmd(ms, cmd);
  518. }
  519. }
  520. static void mesh_done(struct mesh_state *ms, int start_next)
  521. {
  522. struct scsi_cmnd *cmd;
  523. struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
  524. cmd = ms->current_req;
  525. ms->current_req = NULL;
  526. tp->current_req = NULL;
  527. if (cmd) {
  528. struct mesh_cmd_priv *mcmd = mesh_priv(cmd);
  529. set_host_byte(cmd, ms->stat);
  530. set_status_byte(cmd, mcmd->status);
  531. if (ms->stat == DID_OK)
  532. scsi_msg_to_host_byte(cmd, mcmd->message);
  533. if (DEBUG_TARGET(cmd)) {
  534. printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
  535. cmd->result, ms->data_ptr, scsi_bufflen(cmd));
  536. #if 0
  537. /* needs to use sg? */
  538. if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3)
  539. && cmd->request_buffer != 0) {
  540. unsigned char *b = cmd->request_buffer;
  541. printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n",
  542. b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
  543. }
  544. #endif
  545. }
  546. mcmd->this_residual -= ms->data_ptr;
  547. scsi_done(cmd);
  548. }
  549. if (start_next) {
  550. out_8(&ms->mesh->sequence, SEQ_ENBRESEL);
  551. mesh_flush_io(ms->mesh);
  552. udelay(1);
  553. ms->phase = idle;
  554. mesh_start(ms);
  555. }
  556. }
  557. static inline void add_sdtr_msg(struct mesh_state *ms)
  558. {
  559. int i = ms->n_msgout;
  560. ms->msgout[i] = EXTENDED_MESSAGE;
  561. ms->msgout[i+1] = 3;
  562. ms->msgout[i+2] = EXTENDED_SDTR;
  563. ms->msgout[i+3] = mesh_sync_period/4;
  564. ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0);
  565. ms->n_msgout = i + 5;
  566. }
  567. static void set_sdtr(struct mesh_state *ms, int period, int offset)
  568. {
  569. struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
  570. volatile struct mesh_regs __iomem *mr = ms->mesh;
  571. int v, tr;
  572. tp->sdtr_state = sdtr_done;
  573. if (offset == 0) {
  574. /* asynchronous */
  575. if (SYNC_OFF(tp->sync_params))
  576. printk(KERN_INFO "mesh: target %d now asynchronous\n",
  577. ms->conn_tgt);
  578. tp->sync_params = ASYNC_PARAMS;
  579. out_8(&mr->sync_params, ASYNC_PARAMS);
  580. return;
  581. }
  582. /*
  583. * We need to compute ceil(clk_freq * period / 500e6) - 2
  584. * without incurring overflow.
  585. */
  586. v = (ms->clk_freq / 5000) * period;
  587. if (v <= 250000) {
  588. /* special case: sync_period == 5 * clk_period */
  589. v = 0;
  590. /* units of tr are 100kB/s */
  591. tr = (ms->clk_freq + 250000) / 500000;
  592. } else {
  593. /* sync_period == (v + 2) * 2 * clk_period */
  594. v = (v + 99999) / 100000 - 2;
  595. if (v > 15)
  596. v = 15; /* oops */
  597. tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000;
  598. }
  599. if (offset > 15)
  600. offset = 15; /* can't happen */
  601. tp->sync_params = SYNC_PARAMS(offset, v);
  602. out_8(&mr->sync_params, tp->sync_params);
  603. printk(KERN_INFO "mesh: target %d synchronous at %d.%d MB/s\n",
  604. ms->conn_tgt, tr/10, tr%10);
  605. }
  606. static void start_phase(struct mesh_state *ms)
  607. {
  608. int i, seq, nb;
  609. volatile struct mesh_regs __iomem *mr = ms->mesh;
  610. volatile struct dbdma_regs __iomem *md = ms->dma;
  611. struct scsi_cmnd *cmd = ms->current_req;
  612. struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
  613. dlog(ms, "start_phase nmo/exc/fc/seq = %.8x",
  614. MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence));
  615. out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  616. seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
  617. switch (ms->msgphase) {
  618. case msg_none:
  619. break;
  620. case msg_in:
  621. out_8(&mr->count_hi, 0);
  622. out_8(&mr->count_lo, 1);
  623. out_8(&mr->sequence, SEQ_MSGIN + seq);
  624. ms->n_msgin = 0;
  625. return;
  626. case msg_out:
  627. /*
  628. * To make sure ATN drops before we assert ACK for
  629. * the last byte of the message, we have to do the
  630. * last byte specially.
  631. */
  632. if (ms->n_msgout <= 0) {
  633. printk(KERN_ERR "mesh: msg_out but n_msgout=%d\n",
  634. ms->n_msgout);
  635. mesh_dump_regs(ms);
  636. ms->msgphase = msg_none;
  637. break;
  638. }
  639. if (ALLOW_DEBUG(ms->conn_tgt)) {
  640. printk(KERN_DEBUG "mesh: sending %d msg bytes:",
  641. ms->n_msgout);
  642. for (i = 0; i < ms->n_msgout; ++i)
  643. printk(" %x", ms->msgout[i]);
  644. printk("\n");
  645. }
  646. dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0],
  647. ms->msgout[1], ms->msgout[2]));
  648. out_8(&mr->count_hi, 0);
  649. out_8(&mr->sequence, SEQ_FLUSHFIFO);
  650. mesh_flush_io(mr);
  651. udelay(1);
  652. /*
  653. * If ATN is not already asserted, we assert it, then
  654. * issue a SEQ_MSGOUT to get the mesh to drop ACK.
  655. */
  656. if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) {
  657. dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0);
  658. out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */
  659. mesh_flush_io(mr);
  660. udelay(1);
  661. out_8(&mr->count_lo, 1);
  662. out_8(&mr->sequence, SEQ_MSGOUT + seq);
  663. out_8(&mr->bus_status0, 0); /* release explicit ATN */
  664. dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0);
  665. }
  666. if (ms->n_msgout == 1) {
  667. /*
  668. * We can't issue the SEQ_MSGOUT without ATN
  669. * until the target has asserted REQ. The logic
  670. * in cmd_complete handles both situations:
  671. * REQ already asserted or not.
  672. */
  673. cmd_complete(ms);
  674. } else {
  675. out_8(&mr->count_lo, ms->n_msgout - 1);
  676. out_8(&mr->sequence, SEQ_MSGOUT + seq);
  677. for (i = 0; i < ms->n_msgout - 1; ++i)
  678. out_8(&mr->fifo, ms->msgout[i]);
  679. }
  680. return;
  681. default:
  682. printk(KERN_ERR "mesh bug: start_phase msgphase=%d\n",
  683. ms->msgphase);
  684. }
  685. switch (ms->phase) {
  686. case selecting:
  687. out_8(&mr->dest_id, ms->conn_tgt);
  688. out_8(&mr->sequence, SEQ_SELECT + SEQ_ATN);
  689. break;
  690. case commanding:
  691. out_8(&mr->sync_params, tp->sync_params);
  692. out_8(&mr->count_hi, 0);
  693. if (cmd) {
  694. out_8(&mr->count_lo, cmd->cmd_len);
  695. out_8(&mr->sequence, SEQ_COMMAND + seq);
  696. for (i = 0; i < cmd->cmd_len; ++i)
  697. out_8(&mr->fifo, cmd->cmnd[i]);
  698. } else {
  699. out_8(&mr->count_lo, 6);
  700. out_8(&mr->sequence, SEQ_COMMAND + seq);
  701. for (i = 0; i < 6; ++i)
  702. out_8(&mr->fifo, 0);
  703. }
  704. break;
  705. case dataing:
  706. /* transfer data, if any */
  707. if (!ms->dma_started) {
  708. set_dma_cmds(ms, cmd);
  709. out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds));
  710. out_le32(&md->control, (RUN << 16) | RUN);
  711. ms->dma_started = 1;
  712. }
  713. nb = ms->dma_count;
  714. if (nb > 0xfff0)
  715. nb = 0xfff0;
  716. ms->dma_count -= nb;
  717. ms->data_ptr += nb;
  718. out_8(&mr->count_lo, nb);
  719. out_8(&mr->count_hi, nb >> 8);
  720. out_8(&mr->sequence, (tp->data_goes_out?
  721. SEQ_DATAOUT: SEQ_DATAIN) + SEQ_DMA_MODE + seq);
  722. break;
  723. case statusing:
  724. out_8(&mr->count_hi, 0);
  725. out_8(&mr->count_lo, 1);
  726. out_8(&mr->sequence, SEQ_STATUS + seq);
  727. break;
  728. case busfreeing:
  729. case disconnecting:
  730. out_8(&mr->sequence, SEQ_ENBRESEL);
  731. mesh_flush_io(mr);
  732. udelay(1);
  733. dlog(ms, "enbresel intr/exc/err/fc=%.8x",
  734. MKWORD(mr->interrupt, mr->exception, mr->error,
  735. mr->fifo_count));
  736. out_8(&mr->sequence, SEQ_BUSFREE);
  737. break;
  738. default:
  739. printk(KERN_ERR "mesh: start_phase called with phase=%d\n",
  740. ms->phase);
  741. dumpslog(ms);
  742. }
  743. }
  744. static inline void get_msgin(struct mesh_state *ms)
  745. {
  746. volatile struct mesh_regs __iomem *mr = ms->mesh;
  747. int i, n;
  748. n = mr->fifo_count;
  749. if (n != 0) {
  750. i = ms->n_msgin;
  751. ms->n_msgin = i + n;
  752. for (; n > 0; --n)
  753. ms->msgin[i++] = in_8(&mr->fifo);
  754. }
  755. }
  756. static inline int msgin_length(struct mesh_state *ms)
  757. {
  758. int b, n;
  759. n = 1;
  760. if (ms->n_msgin > 0) {
  761. b = ms->msgin[0];
  762. if (b == 1) {
  763. /* extended message */
  764. n = ms->n_msgin < 2? 2: ms->msgin[1] + 2;
  765. } else if (0x20 <= b && b <= 0x2f) {
  766. /* 2-byte message */
  767. n = 2;
  768. }
  769. }
  770. return n;
  771. }
  772. static void reselected(struct mesh_state *ms)
  773. {
  774. volatile struct mesh_regs __iomem *mr = ms->mesh;
  775. struct scsi_cmnd *cmd;
  776. struct mesh_target *tp;
  777. int b, t, prev;
  778. switch (ms->phase) {
  779. case idle:
  780. break;
  781. case arbitrating:
  782. if ((cmd = ms->current_req) != NULL) {
  783. /* put the command back on the queue */
  784. cmd->host_scribble = (void *) ms->request_q;
  785. if (ms->request_q == NULL)
  786. ms->request_qtail = cmd;
  787. ms->request_q = cmd;
  788. tp = &ms->tgts[cmd->device->id];
  789. tp->current_req = NULL;
  790. }
  791. break;
  792. case busfreeing:
  793. ms->phase = reselecting;
  794. mesh_done(ms, 0);
  795. break;
  796. case disconnecting:
  797. break;
  798. default:
  799. printk(KERN_ERR "mesh: reselected in phase %d/%d tgt %d\n",
  800. ms->msgphase, ms->phase, ms->conn_tgt);
  801. dumplog(ms, ms->conn_tgt);
  802. dumpslog(ms);
  803. }
  804. if (ms->dma_started) {
  805. printk(KERN_ERR "mesh: reselected with DMA started !\n");
  806. halt_dma(ms);
  807. }
  808. ms->current_req = NULL;
  809. ms->phase = dataing;
  810. ms->msgphase = msg_in;
  811. ms->n_msgout = 0;
  812. ms->last_n_msgout = 0;
  813. prev = ms->conn_tgt;
  814. /*
  815. * We seem to get abortive reselections sometimes.
  816. */
  817. while ((in_8(&mr->bus_status1) & BS1_BSY) == 0) {
  818. static int mesh_aborted_resels;
  819. mesh_aborted_resels++;
  820. out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  821. mesh_flush_io(mr);
  822. udelay(1);
  823. out_8(&mr->sequence, SEQ_ENBRESEL);
  824. mesh_flush_io(mr);
  825. udelay(5);
  826. dlog(ms, "extra resel err/exc/fc = %.6x",
  827. MKWORD(0, mr->error, mr->exception, mr->fifo_count));
  828. }
  829. out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  830. mesh_flush_io(mr);
  831. udelay(1);
  832. out_8(&mr->sequence, SEQ_ENBRESEL);
  833. mesh_flush_io(mr);
  834. udelay(1);
  835. out_8(&mr->sync_params, ASYNC_PARAMS);
  836. /*
  837. * Find out who reselected us.
  838. */
  839. if (in_8(&mr->fifo_count) == 0) {
  840. printk(KERN_ERR "mesh: reselection but nothing in fifo?\n");
  841. ms->conn_tgt = ms->host->this_id;
  842. goto bogus;
  843. }
  844. /* get the last byte in the fifo */
  845. do {
  846. b = in_8(&mr->fifo);
  847. dlog(ms, "reseldata %x", b);
  848. } while (in_8(&mr->fifo_count));
  849. for (t = 0; t < 8; ++t)
  850. if ((b & (1 << t)) != 0 && t != ms->host->this_id)
  851. break;
  852. if (b != (1 << t) + (1 << ms->host->this_id)) {
  853. printk(KERN_ERR "mesh: bad reselection data %x\n", b);
  854. ms->conn_tgt = ms->host->this_id;
  855. goto bogus;
  856. }
  857. /*
  858. * Set up to continue with that target's transfer.
  859. */
  860. ms->conn_tgt = t;
  861. tp = &ms->tgts[t];
  862. out_8(&mr->sync_params, tp->sync_params);
  863. if (ALLOW_DEBUG(t)) {
  864. printk(KERN_DEBUG "mesh: reselected by target %d\n", t);
  865. printk(KERN_DEBUG "mesh: saved_ptr=%x goes_out=%d cmd=%p\n",
  866. tp->saved_ptr, tp->data_goes_out, tp->current_req);
  867. }
  868. ms->current_req = tp->current_req;
  869. if (tp->current_req == NULL) {
  870. printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t);
  871. goto bogus;
  872. }
  873. ms->data_ptr = tp->saved_ptr;
  874. dlog(ms, "resel prev tgt=%d", prev);
  875. dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception));
  876. start_phase(ms);
  877. return;
  878. bogus:
  879. dumplog(ms, ms->conn_tgt);
  880. dumpslog(ms);
  881. ms->data_ptr = 0;
  882. ms->aborting = 1;
  883. start_phase(ms);
  884. }
  885. static void do_abort(struct mesh_state *ms)
  886. {
  887. ms->msgout[0] = ABORT;
  888. ms->n_msgout = 1;
  889. ms->aborting = 1;
  890. ms->stat = DID_ABORT;
  891. dlog(ms, "abort", 0);
  892. }
  893. static void handle_reset(struct mesh_state *ms)
  894. {
  895. int tgt;
  896. struct mesh_target *tp;
  897. struct scsi_cmnd *cmd;
  898. volatile struct mesh_regs __iomem *mr = ms->mesh;
  899. for (tgt = 0; tgt < 8; ++tgt) {
  900. tp = &ms->tgts[tgt];
  901. if ((cmd = tp->current_req) != NULL) {
  902. set_host_byte(cmd, DID_RESET);
  903. tp->current_req = NULL;
  904. scsi_done(cmd);
  905. }
  906. ms->tgts[tgt].sdtr_state = do_sdtr;
  907. ms->tgts[tgt].sync_params = ASYNC_PARAMS;
  908. }
  909. ms->current_req = NULL;
  910. while ((cmd = ms->request_q) != NULL) {
  911. ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
  912. set_host_byte(cmd, DID_RESET);
  913. scsi_done(cmd);
  914. }
  915. ms->phase = idle;
  916. ms->msgphase = msg_none;
  917. out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  918. out_8(&mr->sequence, SEQ_FLUSHFIFO);
  919. mesh_flush_io(mr);
  920. udelay(1);
  921. out_8(&mr->sync_params, ASYNC_PARAMS);
  922. out_8(&mr->sequence, SEQ_ENBRESEL);
  923. }
  924. static irqreturn_t do_mesh_interrupt(int irq, void *dev_id)
  925. {
  926. unsigned long flags;
  927. struct mesh_state *ms = dev_id;
  928. struct Scsi_Host *dev = ms->host;
  929. spin_lock_irqsave(dev->host_lock, flags);
  930. mesh_interrupt(ms);
  931. spin_unlock_irqrestore(dev->host_lock, flags);
  932. return IRQ_HANDLED;
  933. }
  934. static void handle_error(struct mesh_state *ms)
  935. {
  936. int err, exc, count;
  937. volatile struct mesh_regs __iomem *mr = ms->mesh;
  938. err = in_8(&mr->error);
  939. exc = in_8(&mr->exception);
  940. out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  941. dlog(ms, "error err/exc/fc/cl=%.8x",
  942. MKWORD(err, exc, mr->fifo_count, mr->count_lo));
  943. if (err & ERR_SCSIRESET) {
  944. /* SCSI bus was reset */
  945. printk(KERN_INFO "mesh: SCSI bus reset detected: "
  946. "waiting for end...");
  947. while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
  948. udelay(1);
  949. printk("done\n");
  950. if (ms->dma_started)
  951. halt_dma(ms);
  952. handle_reset(ms);
  953. /* request_q is empty, no point in mesh_start() */
  954. return;
  955. }
  956. if (err & ERR_UNEXPDISC) {
  957. /* Unexpected disconnect */
  958. if (exc & EXC_RESELECTED) {
  959. reselected(ms);
  960. return;
  961. }
  962. if (!ms->aborting) {
  963. printk(KERN_WARNING "mesh: target %d aborted\n",
  964. ms->conn_tgt);
  965. dumplog(ms, ms->conn_tgt);
  966. dumpslog(ms);
  967. }
  968. out_8(&mr->interrupt, INT_CMDDONE);
  969. ms->stat = DID_ABORT;
  970. mesh_done(ms, 1);
  971. return;
  972. }
  973. if (err & ERR_PARITY) {
  974. if (ms->msgphase == msg_in) {
  975. printk(KERN_ERR "mesh: msg parity error, target %d\n",
  976. ms->conn_tgt);
  977. ms->msgout[0] = MSG_PARITY_ERROR;
  978. ms->n_msgout = 1;
  979. ms->msgphase = msg_in_bad;
  980. cmd_complete(ms);
  981. return;
  982. }
  983. if (ms->stat == DID_OK) {
  984. printk(KERN_ERR "mesh: parity error, target %d\n",
  985. ms->conn_tgt);
  986. ms->stat = DID_PARITY;
  987. }
  988. count = (mr->count_hi << 8) + mr->count_lo;
  989. if (count == 0) {
  990. cmd_complete(ms);
  991. } else {
  992. /* reissue the data transfer command */
  993. out_8(&mr->sequence, mr->sequence);
  994. }
  995. return;
  996. }
  997. if (err & ERR_SEQERR) {
  998. if (exc & EXC_RESELECTED) {
  999. /* This can happen if we issue a command to
  1000. get the bus just after the target reselects us. */
  1001. static int mesh_resel_seqerr;
  1002. mesh_resel_seqerr++;
  1003. reselected(ms);
  1004. return;
  1005. }
  1006. if (exc == EXC_PHASEMM) {
  1007. static int mesh_phasemm_seqerr;
  1008. mesh_phasemm_seqerr++;
  1009. phase_mismatch(ms);
  1010. return;
  1011. }
  1012. printk(KERN_ERR "mesh: sequence error (err=%x exc=%x)\n",
  1013. err, exc);
  1014. } else {
  1015. printk(KERN_ERR "mesh: unknown error %x (exc=%x)\n", err, exc);
  1016. }
  1017. mesh_dump_regs(ms);
  1018. dumplog(ms, ms->conn_tgt);
  1019. if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) {
  1020. /* try to do what the target wants */
  1021. do_abort(ms);
  1022. phase_mismatch(ms);
  1023. return;
  1024. }
  1025. ms->stat = DID_ERROR;
  1026. mesh_done(ms, 1);
  1027. }
  1028. static void handle_exception(struct mesh_state *ms)
  1029. {
  1030. int exc;
  1031. volatile struct mesh_regs __iomem *mr = ms->mesh;
  1032. exc = in_8(&mr->exception);
  1033. out_8(&mr->interrupt, INT_EXCEPTION | INT_CMDDONE);
  1034. if (exc & EXC_RESELECTED) {
  1035. static int mesh_resel_exc;
  1036. mesh_resel_exc++;
  1037. reselected(ms);
  1038. } else if (exc == EXC_ARBLOST) {
  1039. printk(KERN_DEBUG "mesh: lost arbitration\n");
  1040. ms->stat = DID_BUS_BUSY;
  1041. mesh_done(ms, 1);
  1042. } else if (exc == EXC_SELTO) {
  1043. /* selection timed out */
  1044. ms->stat = DID_BAD_TARGET;
  1045. mesh_done(ms, 1);
  1046. } else if (exc == EXC_PHASEMM) {
  1047. /* target wants to do something different:
  1048. find out what it wants and do it. */
  1049. phase_mismatch(ms);
  1050. } else {
  1051. printk(KERN_ERR "mesh: can't cope with exception %x\n", exc);
  1052. mesh_dump_regs(ms);
  1053. dumplog(ms, ms->conn_tgt);
  1054. do_abort(ms);
  1055. phase_mismatch(ms);
  1056. }
  1057. }
  1058. static void handle_msgin(struct mesh_state *ms)
  1059. {
  1060. int i, code;
  1061. struct scsi_cmnd *cmd = ms->current_req;
  1062. struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
  1063. if (ms->n_msgin == 0)
  1064. return;
  1065. code = ms->msgin[0];
  1066. if (ALLOW_DEBUG(ms->conn_tgt)) {
  1067. printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin);
  1068. for (i = 0; i < ms->n_msgin; ++i)
  1069. printk(" %x", ms->msgin[i]);
  1070. printk("\n");
  1071. }
  1072. dlog(ms, "msgin msg=%.8x",
  1073. MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2]));
  1074. ms->expect_reply = 0;
  1075. ms->n_msgout = 0;
  1076. if (ms->n_msgin < msgin_length(ms))
  1077. goto reject;
  1078. if (cmd)
  1079. mesh_priv(cmd)->message = code;
  1080. switch (code) {
  1081. case COMMAND_COMPLETE:
  1082. break;
  1083. case EXTENDED_MESSAGE:
  1084. switch (ms->msgin[2]) {
  1085. case EXTENDED_MODIFY_DATA_POINTER:
  1086. ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6]
  1087. + (ms->msgin[4] << 16) + (ms->msgin[5] << 8);
  1088. break;
  1089. case EXTENDED_SDTR:
  1090. if (tp->sdtr_state != sdtr_sent) {
  1091. /* reply with an SDTR */
  1092. add_sdtr_msg(ms);
  1093. /* limit period to at least his value,
  1094. offset to no more than his */
  1095. if (ms->msgout[3] < ms->msgin[3])
  1096. ms->msgout[3] = ms->msgin[3];
  1097. if (ms->msgout[4] > ms->msgin[4])
  1098. ms->msgout[4] = ms->msgin[4];
  1099. set_sdtr(ms, ms->msgout[3], ms->msgout[4]);
  1100. ms->msgphase = msg_out;
  1101. } else {
  1102. set_sdtr(ms, ms->msgin[3], ms->msgin[4]);
  1103. }
  1104. break;
  1105. default:
  1106. goto reject;
  1107. }
  1108. break;
  1109. case SAVE_POINTERS:
  1110. tp->saved_ptr = ms->data_ptr;
  1111. break;
  1112. case RESTORE_POINTERS:
  1113. ms->data_ptr = tp->saved_ptr;
  1114. break;
  1115. case DISCONNECT:
  1116. ms->phase = disconnecting;
  1117. break;
  1118. case ABORT:
  1119. break;
  1120. case MESSAGE_REJECT:
  1121. if (tp->sdtr_state == sdtr_sent)
  1122. set_sdtr(ms, 0, 0);
  1123. break;
  1124. case NOP:
  1125. break;
  1126. default:
  1127. if (IDENTIFY_BASE <= code && code <= IDENTIFY_BASE + 7) {
  1128. if (cmd == NULL) {
  1129. do_abort(ms);
  1130. ms->msgphase = msg_out;
  1131. } else if (code != cmd->device->lun + IDENTIFY_BASE) {
  1132. printk(KERN_WARNING "mesh: lun mismatch "
  1133. "(%d != %llu) on reselection from "
  1134. "target %d\n", code - IDENTIFY_BASE,
  1135. cmd->device->lun, ms->conn_tgt);
  1136. }
  1137. break;
  1138. }
  1139. goto reject;
  1140. }
  1141. return;
  1142. reject:
  1143. printk(KERN_WARNING "mesh: rejecting message from target %d:",
  1144. ms->conn_tgt);
  1145. for (i = 0; i < ms->n_msgin; ++i)
  1146. printk(" %x", ms->msgin[i]);
  1147. printk("\n");
  1148. ms->msgout[0] = MESSAGE_REJECT;
  1149. ms->n_msgout = 1;
  1150. ms->msgphase = msg_out;
  1151. }
  1152. /*
  1153. * Set up DMA commands for transferring data.
  1154. */
  1155. static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
  1156. {
  1157. int i, dma_cmd, total, off, dtot;
  1158. struct scatterlist *scl;
  1159. struct dbdma_cmd *dcmds;
  1160. dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out?
  1161. OUTPUT_MORE: INPUT_MORE;
  1162. dcmds = ms->dma_cmds;
  1163. dtot = 0;
  1164. if (cmd) {
  1165. int nseg;
  1166. mesh_priv(cmd)->this_residual = scsi_bufflen(cmd);
  1167. nseg = scsi_dma_map(cmd);
  1168. BUG_ON(nseg < 0);
  1169. if (nseg) {
  1170. total = 0;
  1171. off = ms->data_ptr;
  1172. scsi_for_each_sg(cmd, scl, nseg, i) {
  1173. u32 dma_addr = sg_dma_address(scl);
  1174. u32 dma_len = sg_dma_len(scl);
  1175. total += scl->length;
  1176. if (off >= dma_len) {
  1177. off -= dma_len;
  1178. continue;
  1179. }
  1180. if (dma_len > 0xffff)
  1181. panic("mesh: scatterlist element >= 64k");
  1182. dcmds->req_count = cpu_to_le16(dma_len - off);
  1183. dcmds->command = cpu_to_le16(dma_cmd);
  1184. dcmds->phy_addr = cpu_to_le32(dma_addr + off);
  1185. dcmds->xfer_status = 0;
  1186. ++dcmds;
  1187. dtot += dma_len - off;
  1188. off = 0;
  1189. }
  1190. }
  1191. }
  1192. if (dtot == 0) {
  1193. /* Either the target has overrun our buffer,
  1194. or the caller didn't provide a buffer. */
  1195. static char mesh_extra_buf[64];
  1196. dtot = sizeof(mesh_extra_buf);
  1197. dcmds->req_count = cpu_to_le16(dtot);
  1198. dcmds->phy_addr = cpu_to_le32(virt_to_phys(mesh_extra_buf));
  1199. dcmds->xfer_status = 0;
  1200. ++dcmds;
  1201. }
  1202. dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
  1203. dcmds[-1].command = cpu_to_le16(dma_cmd);
  1204. memset(dcmds, 0, sizeof(*dcmds));
  1205. dcmds->command = cpu_to_le16(DBDMA_STOP);
  1206. ms->dma_count = dtot;
  1207. }
  1208. static void halt_dma(struct mesh_state *ms)
  1209. {
  1210. volatile struct dbdma_regs __iomem *md = ms->dma;
  1211. volatile struct mesh_regs __iomem *mr = ms->mesh;
  1212. struct scsi_cmnd *cmd = ms->current_req;
  1213. int t, nb;
  1214. if (!ms->tgts[ms->conn_tgt].data_goes_out) {
  1215. /* wait a little while until the fifo drains */
  1216. t = 50;
  1217. while (t > 0 && in_8(&mr->fifo_count) != 0
  1218. && (in_le32(&md->status) & ACTIVE) != 0) {
  1219. --t;
  1220. udelay(1);
  1221. }
  1222. }
  1223. out_le32(&md->control, RUN << 16); /* turn off RUN bit */
  1224. nb = (mr->count_hi << 8) + mr->count_lo;
  1225. dlog(ms, "halt_dma fc/count=%.6x",
  1226. MKWORD(0, mr->fifo_count, 0, nb));
  1227. if (ms->tgts[ms->conn_tgt].data_goes_out)
  1228. nb += mr->fifo_count;
  1229. /* nb is the number of bytes not yet transferred
  1230. to/from the target. */
  1231. ms->data_ptr -= nb;
  1232. dlog(ms, "data_ptr %x", ms->data_ptr);
  1233. if (ms->data_ptr < 0) {
  1234. printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n",
  1235. ms->data_ptr, nb, ms);
  1236. ms->data_ptr = 0;
  1237. #ifdef MESH_DBG
  1238. dumplog(ms, ms->conn_tgt);
  1239. dumpslog(ms);
  1240. #endif /* MESH_DBG */
  1241. } else if (cmd && scsi_bufflen(cmd) &&
  1242. ms->data_ptr > scsi_bufflen(cmd)) {
  1243. printk(KERN_DEBUG "mesh: target %d overrun, "
  1244. "data_ptr=%x total=%x goes_out=%d\n",
  1245. ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
  1246. ms->tgts[ms->conn_tgt].data_goes_out);
  1247. }
  1248. if (cmd)
  1249. scsi_dma_unmap(cmd);
  1250. ms->dma_started = 0;
  1251. }
  1252. static void phase_mismatch(struct mesh_state *ms)
  1253. {
  1254. volatile struct mesh_regs __iomem *mr = ms->mesh;
  1255. int phase;
  1256. dlog(ms, "phasemm ch/cl/seq/fc=%.8x",
  1257. MKWORD(mr->count_hi, mr->count_lo, mr->sequence, mr->fifo_count));
  1258. phase = in_8(&mr->bus_status0) & BS0_PHASE;
  1259. if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) {
  1260. /* output the last byte of the message, without ATN */
  1261. out_8(&mr->count_lo, 1);
  1262. out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
  1263. mesh_flush_io(mr);
  1264. udelay(1);
  1265. out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
  1266. ms->msgphase = msg_out_last;
  1267. return;
  1268. }
  1269. if (ms->msgphase == msg_in) {
  1270. get_msgin(ms);
  1271. if (ms->n_msgin)
  1272. handle_msgin(ms);
  1273. }
  1274. if (ms->dma_started)
  1275. halt_dma(ms);
  1276. if (mr->fifo_count) {
  1277. out_8(&mr->sequence, SEQ_FLUSHFIFO);
  1278. mesh_flush_io(mr);
  1279. udelay(1);
  1280. }
  1281. ms->msgphase = msg_none;
  1282. switch (phase) {
  1283. case BP_DATAIN:
  1284. ms->tgts[ms->conn_tgt].data_goes_out = 0;
  1285. ms->phase = dataing;
  1286. break;
  1287. case BP_DATAOUT:
  1288. ms->tgts[ms->conn_tgt].data_goes_out = 1;
  1289. ms->phase = dataing;
  1290. break;
  1291. case BP_COMMAND:
  1292. ms->phase = commanding;
  1293. break;
  1294. case BP_STATUS:
  1295. ms->phase = statusing;
  1296. break;
  1297. case BP_MSGIN:
  1298. ms->msgphase = msg_in;
  1299. ms->n_msgin = 0;
  1300. break;
  1301. case BP_MSGOUT:
  1302. ms->msgphase = msg_out;
  1303. if (ms->n_msgout == 0) {
  1304. if (ms->aborting) {
  1305. do_abort(ms);
  1306. } else {
  1307. if (ms->last_n_msgout == 0) {
  1308. printk(KERN_DEBUG
  1309. "mesh: no msg to repeat\n");
  1310. ms->msgout[0] = NOP;
  1311. ms->last_n_msgout = 1;
  1312. }
  1313. ms->n_msgout = ms->last_n_msgout;
  1314. }
  1315. }
  1316. break;
  1317. default:
  1318. printk(KERN_DEBUG "mesh: unknown scsi phase %x\n", phase);
  1319. ms->stat = DID_ERROR;
  1320. mesh_done(ms, 1);
  1321. return;
  1322. }
  1323. start_phase(ms);
  1324. }
  1325. static void cmd_complete(struct mesh_state *ms)
  1326. {
  1327. volatile struct mesh_regs __iomem *mr = ms->mesh;
  1328. struct scsi_cmnd *cmd = ms->current_req;
  1329. struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
  1330. int seq, n, t;
  1331. dlog(ms, "cmd_complete fc=%x", mr->fifo_count);
  1332. seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
  1333. switch (ms->msgphase) {
  1334. case msg_out_xxx:
  1335. /* huh? we expected a phase mismatch */
  1336. ms->n_msgin = 0;
  1337. ms->msgphase = msg_in;
  1338. fallthrough;
  1339. case msg_in:
  1340. /* should have some message bytes in fifo */
  1341. get_msgin(ms);
  1342. n = msgin_length(ms);
  1343. if (ms->n_msgin < n) {
  1344. out_8(&mr->count_lo, n - ms->n_msgin);
  1345. out_8(&mr->sequence, SEQ_MSGIN + seq);
  1346. } else {
  1347. ms->msgphase = msg_none;
  1348. handle_msgin(ms);
  1349. start_phase(ms);
  1350. }
  1351. break;
  1352. case msg_in_bad:
  1353. out_8(&mr->sequence, SEQ_FLUSHFIFO);
  1354. mesh_flush_io(mr);
  1355. udelay(1);
  1356. out_8(&mr->count_lo, 1);
  1357. out_8(&mr->sequence, SEQ_MSGIN + SEQ_ATN + use_active_neg);
  1358. break;
  1359. case msg_out:
  1360. /*
  1361. * To get the right timing on ATN wrt ACK, we have
  1362. * to get the MESH to drop ACK, wait until REQ gets
  1363. * asserted, then drop ATN. To do this we first
  1364. * issue a SEQ_MSGOUT with ATN and wait for REQ,
  1365. * then change the command to a SEQ_MSGOUT w/o ATN.
  1366. * If we don't see REQ in a reasonable time, we
  1367. * change the command to SEQ_MSGIN with ATN,
  1368. * wait for the phase mismatch interrupt, then
  1369. * issue the SEQ_MSGOUT without ATN.
  1370. */
  1371. out_8(&mr->count_lo, 1);
  1372. out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg + SEQ_ATN);
  1373. t = 30; /* wait up to 30us */
  1374. while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0)
  1375. udelay(1);
  1376. dlog(ms, "last_mbyte err/exc/fc/cl=%.8x",
  1377. MKWORD(mr->error, mr->exception,
  1378. mr->fifo_count, mr->count_lo));
  1379. if (in_8(&mr->interrupt) & (INT_ERROR | INT_EXCEPTION)) {
  1380. /* whoops, target didn't do what we expected */
  1381. ms->last_n_msgout = ms->n_msgout;
  1382. ms->n_msgout = 0;
  1383. if (in_8(&mr->interrupt) & INT_ERROR) {
  1384. printk(KERN_ERR "mesh: error %x in msg_out\n",
  1385. in_8(&mr->error));
  1386. handle_error(ms);
  1387. return;
  1388. }
  1389. if (in_8(&mr->exception) != EXC_PHASEMM)
  1390. printk(KERN_ERR "mesh: exc %x in msg_out\n",
  1391. in_8(&mr->exception));
  1392. else
  1393. printk(KERN_DEBUG "mesh: bs0=%x in msg_out\n",
  1394. in_8(&mr->bus_status0));
  1395. handle_exception(ms);
  1396. return;
  1397. }
  1398. if (in_8(&mr->bus_status0) & BS0_REQ) {
  1399. out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
  1400. mesh_flush_io(mr);
  1401. udelay(1);
  1402. out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
  1403. ms->msgphase = msg_out_last;
  1404. } else {
  1405. out_8(&mr->sequence, SEQ_MSGIN + use_active_neg + SEQ_ATN);
  1406. ms->msgphase = msg_out_xxx;
  1407. }
  1408. break;
  1409. case msg_out_last:
  1410. ms->last_n_msgout = ms->n_msgout;
  1411. ms->n_msgout = 0;
  1412. ms->msgphase = ms->expect_reply? msg_in: msg_none;
  1413. start_phase(ms);
  1414. break;
  1415. case msg_none:
  1416. switch (ms->phase) {
  1417. case idle:
  1418. printk(KERN_ERR "mesh: interrupt in idle phase?\n");
  1419. dumpslog(ms);
  1420. return;
  1421. case selecting:
  1422. dlog(ms, "Selecting phase at command completion",0);
  1423. ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt),
  1424. (cmd? cmd->device->lun: 0));
  1425. ms->n_msgout = 1;
  1426. ms->expect_reply = 0;
  1427. if (ms->aborting) {
  1428. ms->msgout[0] = ABORT;
  1429. ms->n_msgout++;
  1430. } else if (tp->sdtr_state == do_sdtr) {
  1431. /* add SDTR message */
  1432. add_sdtr_msg(ms);
  1433. ms->expect_reply = 1;
  1434. tp->sdtr_state = sdtr_sent;
  1435. }
  1436. ms->msgphase = msg_out;
  1437. /*
  1438. * We need to wait for REQ before dropping ATN.
  1439. * We wait for at most 30us, then fall back to
  1440. * a scheme where we issue a SEQ_COMMAND with ATN,
  1441. * which will give us a phase mismatch interrupt
  1442. * when REQ does come, and then we send the message.
  1443. */
  1444. t = 230; /* wait up to 230us */
  1445. while ((in_8(&mr->bus_status0) & BS0_REQ) == 0) {
  1446. if (--t < 0) {
  1447. dlog(ms, "impatient for req", ms->n_msgout);
  1448. ms->msgphase = msg_none;
  1449. break;
  1450. }
  1451. udelay(1);
  1452. }
  1453. break;
  1454. case dataing:
  1455. if (ms->dma_count != 0) {
  1456. start_phase(ms);
  1457. return;
  1458. }
  1459. /*
  1460. * We can get a phase mismatch here if the target
  1461. * changes to the status phase, even though we have
  1462. * had a command complete interrupt. Then, if we
  1463. * issue the SEQ_STATUS command, we'll get a sequence
  1464. * error interrupt. Which isn't so bad except that
  1465. * occasionally the mesh actually executes the
  1466. * SEQ_STATUS *as well as* giving us the sequence
  1467. * error and phase mismatch exception.
  1468. */
  1469. out_8(&mr->sequence, 0);
  1470. out_8(&mr->interrupt,
  1471. INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  1472. halt_dma(ms);
  1473. break;
  1474. case statusing:
  1475. if (cmd) {
  1476. struct mesh_cmd_priv *mcmd = mesh_priv(cmd);
  1477. mcmd->status = mr->fifo;
  1478. if (DEBUG_TARGET(cmd))
  1479. printk(KERN_DEBUG "mesh: status is %x\n",
  1480. mcmd->status);
  1481. }
  1482. ms->msgphase = msg_in;
  1483. break;
  1484. case busfreeing:
  1485. mesh_done(ms, 1);
  1486. return;
  1487. case disconnecting:
  1488. ms->current_req = NULL;
  1489. ms->phase = idle;
  1490. mesh_start(ms);
  1491. return;
  1492. default:
  1493. break;
  1494. }
  1495. ++ms->phase;
  1496. start_phase(ms);
  1497. break;
  1498. }
  1499. }
  1500. /*
  1501. * Called by midlayer with host locked to queue a new
  1502. * request
  1503. */
  1504. static int mesh_queue_lck(struct scsi_cmnd *cmd)
  1505. {
  1506. struct mesh_state *ms;
  1507. cmd->host_scribble = NULL;
  1508. ms = (struct mesh_state *) cmd->device->host->hostdata;
  1509. if (ms->request_q == NULL)
  1510. ms->request_q = cmd;
  1511. else
  1512. ms->request_qtail->host_scribble = (void *) cmd;
  1513. ms->request_qtail = cmd;
  1514. if (ms->phase == idle)
  1515. mesh_start(ms);
  1516. return 0;
  1517. }
  1518. static DEF_SCSI_QCMD(mesh_queue)
  1519. /*
  1520. * Called to handle interrupts, either call by the interrupt
  1521. * handler (do_mesh_interrupt) or by other functions in
  1522. * exceptional circumstances
  1523. */
  1524. static void mesh_interrupt(struct mesh_state *ms)
  1525. {
  1526. volatile struct mesh_regs __iomem *mr = ms->mesh;
  1527. int intr;
  1528. #if 0
  1529. if (ALLOW_DEBUG(ms->conn_tgt))
  1530. printk(KERN_DEBUG "mesh_intr, bs0=%x int=%x exc=%x err=%x "
  1531. "phase=%d msgphase=%d\n", mr->bus_status0,
  1532. mr->interrupt, mr->exception, mr->error,
  1533. ms->phase, ms->msgphase);
  1534. #endif
  1535. while ((intr = in_8(&mr->interrupt)) != 0) {
  1536. dlog(ms, "interrupt intr/err/exc/seq=%.8x",
  1537. MKWORD(intr, mr->error, mr->exception, mr->sequence));
  1538. if (intr & INT_ERROR) {
  1539. handle_error(ms);
  1540. } else if (intr & INT_EXCEPTION) {
  1541. handle_exception(ms);
  1542. } else if (intr & INT_CMDDONE) {
  1543. out_8(&mr->interrupt, INT_CMDDONE);
  1544. cmd_complete(ms);
  1545. }
  1546. }
  1547. }
  1548. /* Todo: here we can at least try to remove the command from the
  1549. * queue if it isn't connected yet, and for pending command, assert
  1550. * ATN until the bus gets freed.
  1551. */
  1552. static int mesh_abort(struct scsi_cmnd *cmd)
  1553. {
  1554. struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
  1555. printk(KERN_DEBUG "mesh_abort(%p)\n", cmd);
  1556. mesh_dump_regs(ms);
  1557. dumplog(ms, cmd->device->id);
  1558. dumpslog(ms);
  1559. return FAILED;
  1560. }
  1561. /*
  1562. * Called by the midlayer with the lock held to reset the
  1563. * SCSI host and bus.
  1564. * The midlayer will wait for devices to come back, we don't need
  1565. * to do that ourselves
  1566. */
  1567. static int mesh_host_reset(struct scsi_cmnd *cmd)
  1568. {
  1569. struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
  1570. volatile struct mesh_regs __iomem *mr = ms->mesh;
  1571. volatile struct dbdma_regs __iomem *md = ms->dma;
  1572. unsigned long flags;
  1573. printk(KERN_DEBUG "mesh_host_reset\n");
  1574. spin_lock_irqsave(ms->host->host_lock, flags);
  1575. if (ms->dma_started)
  1576. halt_dma(ms);
  1577. /* Reset the controller & dbdma channel */
  1578. out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
  1579. out_8(&mr->exception, 0xff); /* clear all exception bits */
  1580. out_8(&mr->error, 0xff); /* clear all error bits */
  1581. out_8(&mr->sequence, SEQ_RESETMESH);
  1582. mesh_flush_io(mr);
  1583. udelay(1);
  1584. out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  1585. out_8(&mr->source_id, ms->host->this_id);
  1586. out_8(&mr->sel_timeout, 25); /* 250ms */
  1587. out_8(&mr->sync_params, ASYNC_PARAMS);
  1588. /* Reset the bus */
  1589. out_8(&mr->bus_status1, BS1_RST); /* assert RST */
  1590. mesh_flush_io(mr);
  1591. udelay(30); /* leave it on for >= 25us */
  1592. out_8(&mr->bus_status1, 0); /* negate RST */
  1593. /* Complete pending commands */
  1594. handle_reset(ms);
  1595. spin_unlock_irqrestore(ms->host->host_lock, flags);
  1596. return SUCCESS;
  1597. }
  1598. static void set_mesh_power(struct mesh_state *ms, int state)
  1599. {
  1600. if (!machine_is(powermac))
  1601. return;
  1602. if (state) {
  1603. pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1);
  1604. msleep(200);
  1605. } else {
  1606. pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0);
  1607. msleep(10);
  1608. }
  1609. }
  1610. #ifdef CONFIG_PM
  1611. static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
  1612. {
  1613. struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
  1614. unsigned long flags;
  1615. switch (mesg.event) {
  1616. case PM_EVENT_SUSPEND:
  1617. case PM_EVENT_HIBERNATE:
  1618. case PM_EVENT_FREEZE:
  1619. break;
  1620. default:
  1621. return 0;
  1622. }
  1623. if (ms->phase == sleeping)
  1624. return 0;
  1625. scsi_block_requests(ms->host);
  1626. spin_lock_irqsave(ms->host->host_lock, flags);
  1627. while(ms->phase != idle) {
  1628. spin_unlock_irqrestore(ms->host->host_lock, flags);
  1629. msleep(10);
  1630. spin_lock_irqsave(ms->host->host_lock, flags);
  1631. }
  1632. ms->phase = sleeping;
  1633. spin_unlock_irqrestore(ms->host->host_lock, flags);
  1634. disable_irq(ms->meshintr);
  1635. set_mesh_power(ms, 0);
  1636. return 0;
  1637. }
  1638. static int mesh_resume(struct macio_dev *mdev)
  1639. {
  1640. struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
  1641. unsigned long flags;
  1642. if (ms->phase != sleeping)
  1643. return 0;
  1644. set_mesh_power(ms, 1);
  1645. mesh_init(ms);
  1646. spin_lock_irqsave(ms->host->host_lock, flags);
  1647. mesh_start(ms);
  1648. spin_unlock_irqrestore(ms->host->host_lock, flags);
  1649. enable_irq(ms->meshintr);
  1650. scsi_unblock_requests(ms->host);
  1651. return 0;
  1652. }
  1653. #endif /* CONFIG_PM */
  1654. /*
  1655. * If we leave drives set for synchronous transfers (especially
  1656. * CDROMs), and reboot to MacOS, it gets confused, poor thing.
  1657. * So, on reboot we reset the SCSI bus.
  1658. */
  1659. static int mesh_shutdown(struct macio_dev *mdev)
  1660. {
  1661. struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
  1662. volatile struct mesh_regs __iomem *mr;
  1663. unsigned long flags;
  1664. printk(KERN_INFO "resetting MESH scsi bus(es)\n");
  1665. spin_lock_irqsave(ms->host->host_lock, flags);
  1666. mr = ms->mesh;
  1667. out_8(&mr->intr_mask, 0);
  1668. out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
  1669. out_8(&mr->bus_status1, BS1_RST);
  1670. mesh_flush_io(mr);
  1671. udelay(30);
  1672. out_8(&mr->bus_status1, 0);
  1673. spin_unlock_irqrestore(ms->host->host_lock, flags);
  1674. return 0;
  1675. }
  1676. static struct scsi_host_template mesh_template = {
  1677. .proc_name = "mesh",
  1678. .name = "MESH",
  1679. .queuecommand = mesh_queue,
  1680. .eh_abort_handler = mesh_abort,
  1681. .eh_host_reset_handler = mesh_host_reset,
  1682. .can_queue = 20,
  1683. .this_id = 7,
  1684. .sg_tablesize = SG_ALL,
  1685. .cmd_per_lun = 2,
  1686. .max_segment_size = 65535,
  1687. .cmd_size = sizeof(struct mesh_cmd_priv),
  1688. };
  1689. static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
  1690. {
  1691. struct device_node *mesh = macio_get_of_node(mdev);
  1692. struct pci_dev* pdev = macio_get_pci_dev(mdev);
  1693. int tgt, minper;
  1694. const int *cfp;
  1695. struct mesh_state *ms;
  1696. struct Scsi_Host *mesh_host;
  1697. void *dma_cmd_space;
  1698. dma_addr_t dma_cmd_bus;
  1699. switch (mdev->bus->chip->type) {
  1700. case macio_heathrow:
  1701. case macio_gatwick:
  1702. case macio_paddington:
  1703. use_active_neg = 0;
  1704. break;
  1705. default:
  1706. use_active_neg = SEQ_ACTIVE_NEG;
  1707. }
  1708. if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
  1709. printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs"
  1710. " (got %d,%d)\n", macio_resource_count(mdev),
  1711. macio_irq_count(mdev));
  1712. return -ENODEV;
  1713. }
  1714. if (macio_request_resources(mdev, "mesh") != 0) {
  1715. printk(KERN_ERR "mesh: unable to request memory resources");
  1716. return -EBUSY;
  1717. }
  1718. mesh_host = scsi_host_alloc(&mesh_template, sizeof(struct mesh_state));
  1719. if (mesh_host == NULL) {
  1720. printk(KERN_ERR "mesh: couldn't register host");
  1721. goto out_release;
  1722. }
  1723. mesh_host->base = macio_resource_start(mdev, 0);
  1724. mesh_host->irq = macio_irq(mdev, 0);
  1725. ms = (struct mesh_state *) mesh_host->hostdata;
  1726. macio_set_drvdata(mdev, ms);
  1727. ms->host = mesh_host;
  1728. ms->mdev = mdev;
  1729. ms->pdev = pdev;
  1730. ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000);
  1731. if (ms->mesh == NULL) {
  1732. printk(KERN_ERR "mesh: can't map registers\n");
  1733. goto out_free;
  1734. }
  1735. ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
  1736. if (ms->dma == NULL) {
  1737. printk(KERN_ERR "mesh: can't map registers\n");
  1738. iounmap(ms->mesh);
  1739. goto out_free;
  1740. }
  1741. ms->meshintr = macio_irq(mdev, 0);
  1742. ms->dmaintr = macio_irq(mdev, 1);
  1743. /* Space for dma command list: +1 for stop command,
  1744. * +1 to allow for aligning.
  1745. */
  1746. ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd);
  1747. /* We use the PCI APIs for now until the generic one gets fixed
  1748. * enough or until we get some macio-specific versions
  1749. */
  1750. dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
  1751. ms->dma_cmd_size, &dma_cmd_bus,
  1752. GFP_KERNEL);
  1753. if (dma_cmd_space == NULL) {
  1754. printk(KERN_ERR "mesh: can't allocate DMA table\n");
  1755. goto out_unmap;
  1756. }
  1757. ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space);
  1758. ms->dma_cmd_space = dma_cmd_space;
  1759. ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds)
  1760. - (unsigned long)dma_cmd_space;
  1761. ms->current_req = NULL;
  1762. for (tgt = 0; tgt < 8; ++tgt) {
  1763. ms->tgts[tgt].sdtr_state = do_sdtr;
  1764. ms->tgts[tgt].sync_params = ASYNC_PARAMS;
  1765. ms->tgts[tgt].current_req = NULL;
  1766. }
  1767. if ((cfp = of_get_property(mesh, "clock-frequency", NULL)))
  1768. ms->clk_freq = *cfp;
  1769. else {
  1770. printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n");
  1771. ms->clk_freq = 50000000;
  1772. }
  1773. /* The maximum sync rate is clock / 5; increase
  1774. * mesh_sync_period if necessary.
  1775. */
  1776. minper = 1000000000 / (ms->clk_freq / 5); /* ns */
  1777. if (mesh_sync_period < minper)
  1778. mesh_sync_period = minper;
  1779. /* Power up the chip */
  1780. set_mesh_power(ms, 1);
  1781. /* Set it up */
  1782. mesh_init(ms);
  1783. /* Request interrupt */
  1784. if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
  1785. printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
  1786. goto out_shutdown;
  1787. }
  1788. /* Add scsi host & scan */
  1789. if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
  1790. goto out_release_irq;
  1791. scsi_scan_host(mesh_host);
  1792. return 0;
  1793. out_release_irq:
  1794. free_irq(ms->meshintr, ms);
  1795. out_shutdown:
  1796. /* shutdown & reset bus in case of error or macos can be confused
  1797. * at reboot if the bus was set to synchronous mode already
  1798. */
  1799. mesh_shutdown(mdev);
  1800. set_mesh_power(ms, 0);
  1801. dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
  1802. ms->dma_cmd_space, ms->dma_cmd_bus);
  1803. out_unmap:
  1804. iounmap(ms->dma);
  1805. iounmap(ms->mesh);
  1806. out_free:
  1807. scsi_host_put(mesh_host);
  1808. out_release:
  1809. macio_release_resources(mdev);
  1810. return -ENODEV;
  1811. }
  1812. static int mesh_remove(struct macio_dev *mdev)
  1813. {
  1814. struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
  1815. struct Scsi_Host *mesh_host = ms->host;
  1816. scsi_remove_host(mesh_host);
  1817. free_irq(ms->meshintr, ms);
  1818. /* Reset scsi bus */
  1819. mesh_shutdown(mdev);
  1820. /* Shut down chip & termination */
  1821. set_mesh_power(ms, 0);
  1822. /* Unmap registers & dma controller */
  1823. iounmap(ms->mesh);
  1824. iounmap(ms->dma);
  1825. /* Free DMA commands memory */
  1826. dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
  1827. ms->dma_cmd_space, ms->dma_cmd_bus);
  1828. /* Release memory resources */
  1829. macio_release_resources(mdev);
  1830. scsi_host_put(mesh_host);
  1831. return 0;
  1832. }
  1833. static struct of_device_id mesh_match[] =
  1834. {
  1835. {
  1836. .name = "mesh",
  1837. },
  1838. {
  1839. .type = "scsi",
  1840. .compatible = "chrp,mesh0"
  1841. },
  1842. {},
  1843. };
  1844. MODULE_DEVICE_TABLE (of, mesh_match);
  1845. static struct macio_driver mesh_driver =
  1846. {
  1847. .driver = {
  1848. .name = "mesh",
  1849. .owner = THIS_MODULE,
  1850. .of_match_table = mesh_match,
  1851. },
  1852. .probe = mesh_probe,
  1853. .remove = mesh_remove,
  1854. .shutdown = mesh_shutdown,
  1855. #ifdef CONFIG_PM
  1856. .suspend = mesh_suspend,
  1857. .resume = mesh_resume,
  1858. #endif
  1859. };
  1860. static int __init init_mesh(void)
  1861. {
  1862. /* Calculate sync rate from module parameters */
  1863. if (sync_rate > 10)
  1864. sync_rate = 10;
  1865. if (sync_rate > 0) {
  1866. printk(KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate);
  1867. mesh_sync_period = 1000 / sync_rate; /* ns */
  1868. mesh_sync_offset = 15;
  1869. } else
  1870. printk(KERN_INFO "mesh: configured for asynchronous\n");
  1871. return macio_register_driver(&mesh_driver);
  1872. }
  1873. static void __exit exit_mesh(void)
  1874. {
  1875. return macio_unregister_driver(&mesh_driver);
  1876. }
  1877. module_init(init_mesh);
  1878. module_exit(exit_mesh);