r852.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright © 2009 - Maxim Levitsky
  4. * driver for Ricoh xD readers
  5. */
  6. #define DRV_NAME "r852"
  7. #define pr_fmt(fmt) DRV_NAME ": " fmt
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/jiffies.h>
  11. #include <linux/workqueue.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/pci.h>
  14. #include <linux/pci_ids.h>
  15. #include <linux/delay.h>
  16. #include <linux/slab.h>
  17. #include <asm/byteorder.h>
  18. #include <linux/sched.h>
  19. #include "sm_common.h"
  20. #include "r852.h"
  21. static bool r852_enable_dma = 1;
  22. module_param(r852_enable_dma, bool, S_IRUGO);
  23. MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
  24. static int debug;
  25. module_param(debug, int, S_IRUGO | S_IWUSR);
  26. MODULE_PARM_DESC(debug, "Debug level (0-2)");
  27. /* read register */
  28. static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
  29. {
  30. uint8_t reg = readb(dev->mmio + address);
  31. return reg;
  32. }
  33. /* write register */
  34. static inline void r852_write_reg(struct r852_device *dev,
  35. int address, uint8_t value)
  36. {
  37. writeb(value, dev->mmio + address);
  38. }
  39. /* read dword sized register */
  40. static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
  41. {
  42. uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
  43. return reg;
  44. }
  45. /* write dword sized register */
  46. static inline void r852_write_reg_dword(struct r852_device *dev,
  47. int address, uint32_t value)
  48. {
  49. writel(cpu_to_le32(value), dev->mmio + address);
  50. }
  51. /* returns pointer to our private structure */
  52. static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
  53. {
  54. struct nand_chip *chip = mtd_to_nand(mtd);
  55. return nand_get_controller_data(chip);
  56. }
  57. /* check if controller supports dma */
  58. static void r852_dma_test(struct r852_device *dev)
  59. {
  60. dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
  61. (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
  62. if (!dev->dma_usable)
  63. message("Non dma capable device detected, dma disabled");
  64. if (!r852_enable_dma) {
  65. message("disabling dma on user request");
  66. dev->dma_usable = 0;
  67. }
  68. }
  69. /*
  70. * Enable dma. Enables ether first or second stage of the DMA,
  71. * Expects dev->dma_dir and dev->dma_state be set
  72. */
  73. static void r852_dma_enable(struct r852_device *dev)
  74. {
  75. uint8_t dma_reg, dma_irq_reg;
  76. /* Set up dma settings */
  77. dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
  78. dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
  79. if (dev->dma_dir)
  80. dma_reg |= R852_DMA_READ;
  81. if (dev->dma_state == DMA_INTERNAL) {
  82. dma_reg |= R852_DMA_INTERNAL;
  83. /* Precaution to make sure HW doesn't write */
  84. /* to random kernel memory */
  85. r852_write_reg_dword(dev, R852_DMA_ADDR,
  86. cpu_to_le32(dev->phys_bounce_buffer));
  87. } else {
  88. dma_reg |= R852_DMA_MEMORY;
  89. r852_write_reg_dword(dev, R852_DMA_ADDR,
  90. cpu_to_le32(dev->phys_dma_addr));
  91. }
  92. /* Precaution: make sure write reached the device */
  93. r852_read_reg_dword(dev, R852_DMA_ADDR);
  94. r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
  95. /* Set dma irq */
  96. dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
  97. r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
  98. dma_irq_reg |
  99. R852_DMA_IRQ_INTERNAL |
  100. R852_DMA_IRQ_ERROR |
  101. R852_DMA_IRQ_MEMORY);
  102. }
  103. /*
  104. * Disable dma, called from the interrupt handler, which specifies
  105. * success of the operation via 'error' argument
  106. */
  107. static void r852_dma_done(struct r852_device *dev, int error)
  108. {
  109. WARN_ON(dev->dma_stage == 0);
  110. r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
  111. r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
  112. r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
  113. r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
  114. /* Precaution to make sure HW doesn't write to random kernel memory */
  115. r852_write_reg_dword(dev, R852_DMA_ADDR,
  116. cpu_to_le32(dev->phys_bounce_buffer));
  117. r852_read_reg_dword(dev, R852_DMA_ADDR);
  118. dev->dma_error = error;
  119. dev->dma_stage = 0;
  120. if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
  121. dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr,
  122. R852_DMA_LEN,
  123. dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
  124. }
  125. /*
  126. * Wait, till dma is done, which includes both phases of it
  127. */
  128. static int r852_dma_wait(struct r852_device *dev)
  129. {
  130. long timeout = wait_for_completion_timeout(&dev->dma_done,
  131. msecs_to_jiffies(1000));
  132. if (!timeout) {
  133. dbg("timeout waiting for DMA interrupt");
  134. return -ETIMEDOUT;
  135. }
  136. return 0;
  137. }
  138. /*
  139. * Read/Write one page using dma. Only pages can be read (512 bytes)
  140. */
  141. static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
  142. {
  143. int bounce = 0;
  144. unsigned long flags;
  145. int error;
  146. dev->dma_error = 0;
  147. /* Set dma direction */
  148. dev->dma_dir = do_read;
  149. dev->dma_stage = 1;
  150. reinit_completion(&dev->dma_done);
  151. dbg_verbose("doing dma %s ", do_read ? "read" : "write");
  152. /* Set initial dma state: for reading first fill on board buffer,
  153. from device, for writes first fill the buffer from memory*/
  154. dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
  155. /* if incoming buffer is not page aligned, we should do bounce */
  156. if ((unsigned long)buf & (R852_DMA_LEN-1))
  157. bounce = 1;
  158. if (!bounce) {
  159. dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf,
  160. R852_DMA_LEN,
  161. do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
  162. if (dma_mapping_error(&dev->pci_dev->dev, dev->phys_dma_addr))
  163. bounce = 1;
  164. }
  165. if (bounce) {
  166. dbg_verbose("dma: using bounce buffer");
  167. dev->phys_dma_addr = dev->phys_bounce_buffer;
  168. if (!do_read)
  169. memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
  170. }
  171. /* Enable DMA */
  172. spin_lock_irqsave(&dev->irqlock, flags);
  173. r852_dma_enable(dev);
  174. spin_unlock_irqrestore(&dev->irqlock, flags);
  175. /* Wait till complete */
  176. error = r852_dma_wait(dev);
  177. if (error) {
  178. r852_dma_done(dev, error);
  179. return;
  180. }
  181. if (do_read && bounce)
  182. memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
  183. }
  184. /*
  185. * Program data lines of the nand chip to send data to it
  186. */
  187. static void r852_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
  188. {
  189. struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
  190. uint32_t reg;
  191. /* Don't allow any access to hardware if we suspect card removal */
  192. if (dev->card_unstable)
  193. return;
  194. /* Special case for whole sector read */
  195. if (len == R852_DMA_LEN && dev->dma_usable) {
  196. r852_do_dma(dev, (uint8_t *)buf, 0);
  197. return;
  198. }
  199. /* write DWORD chinks - faster */
  200. while (len >= 4) {
  201. reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
  202. r852_write_reg_dword(dev, R852_DATALINE, reg);
  203. buf += 4;
  204. len -= 4;
  205. }
  206. /* write rest */
  207. while (len > 0) {
  208. r852_write_reg(dev, R852_DATALINE, *buf++);
  209. len--;
  210. }
  211. }
  212. /*
  213. * Read data lines of the nand chip to retrieve data
  214. */
  215. static void r852_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
  216. {
  217. struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
  218. uint32_t reg;
  219. if (dev->card_unstable) {
  220. /* since we can't signal error here, at least, return
  221. predictable buffer */
  222. memset(buf, 0, len);
  223. return;
  224. }
  225. /* special case for whole sector read */
  226. if (len == R852_DMA_LEN && dev->dma_usable) {
  227. r852_do_dma(dev, buf, 1);
  228. return;
  229. }
  230. /* read in dword sized chunks */
  231. while (len >= 4) {
  232. reg = r852_read_reg_dword(dev, R852_DATALINE);
  233. *buf++ = reg & 0xFF;
  234. *buf++ = (reg >> 8) & 0xFF;
  235. *buf++ = (reg >> 16) & 0xFF;
  236. *buf++ = (reg >> 24) & 0xFF;
  237. len -= 4;
  238. }
  239. /* read the reset by bytes */
  240. while (len--)
  241. *buf++ = r852_read_reg(dev, R852_DATALINE);
  242. }
  243. /*
  244. * Read one byte from nand chip
  245. */
  246. static uint8_t r852_read_byte(struct nand_chip *chip)
  247. {
  248. struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
  249. /* Same problem as in r852_read_buf.... */
  250. if (dev->card_unstable)
  251. return 0;
  252. return r852_read_reg(dev, R852_DATALINE);
  253. }
  254. /*
  255. * Control several chip lines & send commands
  256. */
  257. static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl)
  258. {
  259. struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
  260. if (dev->card_unstable)
  261. return;
  262. if (ctrl & NAND_CTRL_CHANGE) {
  263. dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
  264. R852_CTL_ON | R852_CTL_CARDENABLE);
  265. if (ctrl & NAND_ALE)
  266. dev->ctlreg |= R852_CTL_DATA;
  267. if (ctrl & NAND_CLE)
  268. dev->ctlreg |= R852_CTL_COMMAND;
  269. if (ctrl & NAND_NCE)
  270. dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
  271. else
  272. dev->ctlreg &= ~R852_CTL_WRITE;
  273. /* when write is stareted, enable write access */
  274. if (dat == NAND_CMD_ERASE1)
  275. dev->ctlreg |= R852_CTL_WRITE;
  276. r852_write_reg(dev, R852_CTL, dev->ctlreg);
  277. }
  278. /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
  279. to set write mode */
  280. if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
  281. dev->ctlreg |= R852_CTL_WRITE;
  282. r852_write_reg(dev, R852_CTL, dev->ctlreg);
  283. }
  284. if (dat != NAND_CMD_NONE)
  285. r852_write_reg(dev, R852_DATALINE, dat);
  286. }
  287. /*
  288. * Wait till card is ready.
  289. * based on nand_wait, but returns errors on DMA error
  290. */
  291. static int r852_wait(struct nand_chip *chip)
  292. {
  293. struct r852_device *dev = nand_get_controller_data(chip);
  294. unsigned long timeout;
  295. u8 status;
  296. timeout = jiffies + msecs_to_jiffies(400);
  297. while (time_before(jiffies, timeout))
  298. if (chip->legacy.dev_ready(chip))
  299. break;
  300. nand_status_op(chip, &status);
  301. /* Unfortunelly, no way to send detailed error status... */
  302. if (dev->dma_error) {
  303. status |= NAND_STATUS_FAIL;
  304. dev->dma_error = 0;
  305. }
  306. return status;
  307. }
  308. /*
  309. * Check if card is ready
  310. */
  311. static int r852_ready(struct nand_chip *chip)
  312. {
  313. struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
  314. return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
  315. }
  316. /*
  317. * Set ECC engine mode
  318. */
  319. static void r852_ecc_hwctl(struct nand_chip *chip, int mode)
  320. {
  321. struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
  322. if (dev->card_unstable)
  323. return;
  324. switch (mode) {
  325. case NAND_ECC_READ:
  326. case NAND_ECC_WRITE:
  327. /* enable ecc generation/check*/
  328. dev->ctlreg |= R852_CTL_ECC_ENABLE;
  329. /* flush ecc buffer */
  330. r852_write_reg(dev, R852_CTL,
  331. dev->ctlreg | R852_CTL_ECC_ACCESS);
  332. r852_read_reg_dword(dev, R852_DATALINE);
  333. r852_write_reg(dev, R852_CTL, dev->ctlreg);
  334. return;
  335. case NAND_ECC_READSYN:
  336. /* disable ecc generation */
  337. dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
  338. r852_write_reg(dev, R852_CTL, dev->ctlreg);
  339. }
  340. }
  341. /*
  342. * Calculate ECC, only used for writes
  343. */
  344. static int r852_ecc_calculate(struct nand_chip *chip, const uint8_t *dat,
  345. uint8_t *ecc_code)
  346. {
  347. struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
  348. struct sm_oob *oob = (struct sm_oob *)ecc_code;
  349. uint32_t ecc1, ecc2;
  350. if (dev->card_unstable)
  351. return 0;
  352. dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
  353. r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
  354. ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
  355. ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
  356. oob->ecc1[0] = (ecc1) & 0xFF;
  357. oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
  358. oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
  359. oob->ecc2[0] = (ecc2) & 0xFF;
  360. oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
  361. oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
  362. r852_write_reg(dev, R852_CTL, dev->ctlreg);
  363. return 0;
  364. }
  365. /*
  366. * Correct the data using ECC, hw did almost everything for us
  367. */
  368. static int r852_ecc_correct(struct nand_chip *chip, uint8_t *dat,
  369. uint8_t *read_ecc, uint8_t *calc_ecc)
  370. {
  371. uint32_t ecc_reg;
  372. uint8_t ecc_status, err_byte;
  373. int i, error = 0;
  374. struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
  375. if (dev->card_unstable)
  376. return 0;
  377. if (dev->dma_error) {
  378. dev->dma_error = 0;
  379. return -EIO;
  380. }
  381. r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
  382. ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
  383. r852_write_reg(dev, R852_CTL, dev->ctlreg);
  384. for (i = 0 ; i <= 1 ; i++) {
  385. ecc_status = (ecc_reg >> 8) & 0xFF;
  386. /* ecc uncorrectable error */
  387. if (ecc_status & R852_ECC_FAIL) {
  388. dbg("ecc: unrecoverable error, in half %d", i);
  389. error = -EBADMSG;
  390. goto exit;
  391. }
  392. /* correctable error */
  393. if (ecc_status & R852_ECC_CORRECTABLE) {
  394. err_byte = ecc_reg & 0xFF;
  395. dbg("ecc: recoverable error, "
  396. "in half %d, byte %d, bit %d", i,
  397. err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
  398. dat[err_byte] ^=
  399. 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
  400. error++;
  401. }
  402. dat += 256;
  403. ecc_reg >>= 16;
  404. }
  405. exit:
  406. return error;
  407. }
  408. /*
  409. * This is copy of nand_read_oob_std
  410. * nand_read_oob_syndrome assumes we can send column address - we can't
  411. */
  412. static int r852_read_oob(struct nand_chip *chip, int page)
  413. {
  414. struct mtd_info *mtd = nand_to_mtd(chip);
  415. return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
  416. }
  417. /*
  418. * Start the nand engine
  419. */
  420. static void r852_engine_enable(struct r852_device *dev)
  421. {
  422. if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
  423. r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
  424. r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
  425. } else {
  426. r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
  427. r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
  428. }
  429. msleep(300);
  430. r852_write_reg(dev, R852_CTL, 0);
  431. }
  432. /*
  433. * Stop the nand engine
  434. */
  435. static void r852_engine_disable(struct r852_device *dev)
  436. {
  437. r852_write_reg_dword(dev, R852_HW, 0);
  438. r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
  439. }
  440. /*
  441. * Test if card is present
  442. */
  443. static void r852_card_update_present(struct r852_device *dev)
  444. {
  445. unsigned long flags;
  446. uint8_t reg;
  447. spin_lock_irqsave(&dev->irqlock, flags);
  448. reg = r852_read_reg(dev, R852_CARD_STA);
  449. dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
  450. spin_unlock_irqrestore(&dev->irqlock, flags);
  451. }
  452. /*
  453. * Update card detection IRQ state according to current card state
  454. * which is read in r852_card_update_present
  455. */
  456. static void r852_update_card_detect(struct r852_device *dev)
  457. {
  458. int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
  459. dev->card_unstable = 0;
  460. card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
  461. card_detect_reg |= R852_CARD_IRQ_GENABLE;
  462. card_detect_reg |= dev->card_detected ?
  463. R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
  464. r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
  465. }
  466. static ssize_t media_type_show(struct device *sys_dev,
  467. struct device_attribute *attr, char *buf)
  468. {
  469. struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
  470. struct r852_device *dev = r852_get_dev(mtd);
  471. char *data = dev->sm ? "smartmedia" : "xd";
  472. strcpy(buf, data);
  473. return strlen(data);
  474. }
  475. static DEVICE_ATTR_RO(media_type);
  476. /* Detect properties of card in slot */
  477. static void r852_update_media_status(struct r852_device *dev)
  478. {
  479. uint8_t reg;
  480. unsigned long flags;
  481. int readonly;
  482. spin_lock_irqsave(&dev->irqlock, flags);
  483. if (!dev->card_detected) {
  484. message("card removed");
  485. spin_unlock_irqrestore(&dev->irqlock, flags);
  486. return ;
  487. }
  488. readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
  489. reg = r852_read_reg(dev, R852_DMA_CAP);
  490. dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
  491. message("detected %s %s card in slot",
  492. dev->sm ? "SmartMedia" : "xD",
  493. readonly ? "readonly" : "writeable");
  494. dev->readonly = readonly;
  495. spin_unlock_irqrestore(&dev->irqlock, flags);
  496. }
  497. /*
  498. * Register the nand device
  499. * Called when the card is detected
  500. */
  501. static int r852_register_nand_device(struct r852_device *dev)
  502. {
  503. struct mtd_info *mtd = nand_to_mtd(dev->chip);
  504. WARN_ON(dev->card_registered);
  505. mtd->dev.parent = &dev->pci_dev->dev;
  506. if (dev->readonly)
  507. dev->chip->options |= NAND_ROM;
  508. r852_engine_enable(dev);
  509. if (sm_register_device(mtd, dev->sm))
  510. goto error1;
  511. if (device_create_file(&mtd->dev, &dev_attr_media_type)) {
  512. message("can't create media type sysfs attribute");
  513. goto error3;
  514. }
  515. dev->card_registered = 1;
  516. return 0;
  517. error3:
  518. WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip)));
  519. nand_cleanup(dev->chip);
  520. error1:
  521. /* Force card redetect */
  522. dev->card_detected = 0;
  523. return -1;
  524. }
  525. /*
  526. * Unregister the card
  527. */
  528. static void r852_unregister_nand_device(struct r852_device *dev)
  529. {
  530. struct mtd_info *mtd = nand_to_mtd(dev->chip);
  531. if (!dev->card_registered)
  532. return;
  533. device_remove_file(&mtd->dev, &dev_attr_media_type);
  534. WARN_ON(mtd_device_unregister(mtd));
  535. nand_cleanup(dev->chip);
  536. r852_engine_disable(dev);
  537. dev->card_registered = 0;
  538. }
  539. /* Card state updater */
  540. static void r852_card_detect_work(struct work_struct *work)
  541. {
  542. struct r852_device *dev =
  543. container_of(work, struct r852_device, card_detect_work.work);
  544. r852_card_update_present(dev);
  545. r852_update_card_detect(dev);
  546. dev->card_unstable = 0;
  547. /* False alarm */
  548. if (dev->card_detected == dev->card_registered)
  549. goto exit;
  550. /* Read media properties */
  551. r852_update_media_status(dev);
  552. /* Register the card */
  553. if (dev->card_detected)
  554. r852_register_nand_device(dev);
  555. else
  556. r852_unregister_nand_device(dev);
  557. exit:
  558. r852_update_card_detect(dev);
  559. }
  560. /* Ack + disable IRQ generation */
  561. static void r852_disable_irqs(struct r852_device *dev)
  562. {
  563. uint8_t reg;
  564. reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
  565. r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
  566. reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
  567. r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
  568. reg & ~R852_DMA_IRQ_MASK);
  569. r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
  570. r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
  571. }
  572. /* Interrupt handler */
  573. static irqreturn_t r852_irq(int irq, void *data)
  574. {
  575. struct r852_device *dev = (struct r852_device *)data;
  576. uint8_t card_status, dma_status;
  577. irqreturn_t ret = IRQ_NONE;
  578. spin_lock(&dev->irqlock);
  579. /* handle card detection interrupts first */
  580. card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
  581. r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
  582. if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
  583. ret = IRQ_HANDLED;
  584. dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
  585. /* we shouldn't receive any interrupts if we wait for card
  586. to settle */
  587. WARN_ON(dev->card_unstable);
  588. /* disable irqs while card is unstable */
  589. /* this will timeout DMA if active, but better that garbage */
  590. r852_disable_irqs(dev);
  591. if (dev->card_unstable)
  592. goto out;
  593. /* let, card state to settle a bit, and then do the work */
  594. dev->card_unstable = 1;
  595. queue_delayed_work(dev->card_workqueue,
  596. &dev->card_detect_work, msecs_to_jiffies(100));
  597. goto out;
  598. }
  599. /* Handle dma interrupts */
  600. dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
  601. r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
  602. if (dma_status & R852_DMA_IRQ_MASK) {
  603. ret = IRQ_HANDLED;
  604. if (dma_status & R852_DMA_IRQ_ERROR) {
  605. dbg("received dma error IRQ");
  606. r852_dma_done(dev, -EIO);
  607. complete(&dev->dma_done);
  608. goto out;
  609. }
  610. /* received DMA interrupt out of nowhere? */
  611. WARN_ON_ONCE(dev->dma_stage == 0);
  612. if (dev->dma_stage == 0)
  613. goto out;
  614. /* done device access */
  615. if (dev->dma_state == DMA_INTERNAL &&
  616. (dma_status & R852_DMA_IRQ_INTERNAL)) {
  617. dev->dma_state = DMA_MEMORY;
  618. dev->dma_stage++;
  619. }
  620. /* done memory DMA */
  621. if (dev->dma_state == DMA_MEMORY &&
  622. (dma_status & R852_DMA_IRQ_MEMORY)) {
  623. dev->dma_state = DMA_INTERNAL;
  624. dev->dma_stage++;
  625. }
  626. /* Enable 2nd half of dma dance */
  627. if (dev->dma_stage == 2)
  628. r852_dma_enable(dev);
  629. /* Operation done */
  630. if (dev->dma_stage == 3) {
  631. r852_dma_done(dev, 0);
  632. complete(&dev->dma_done);
  633. }
  634. goto out;
  635. }
  636. /* Handle unknown interrupts */
  637. if (dma_status)
  638. dbg("bad dma IRQ status = %x", dma_status);
  639. if (card_status & ~R852_CARD_STA_CD)
  640. dbg("strange card status = %x", card_status);
  641. out:
  642. spin_unlock(&dev->irqlock);
  643. return ret;
  644. }
  645. static int r852_attach_chip(struct nand_chip *chip)
  646. {
  647. if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
  648. return 0;
  649. chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
  650. chip->ecc.size = R852_DMA_LEN;
  651. chip->ecc.bytes = SM_OOB_SIZE;
  652. chip->ecc.strength = 2;
  653. chip->ecc.hwctl = r852_ecc_hwctl;
  654. chip->ecc.calculate = r852_ecc_calculate;
  655. chip->ecc.correct = r852_ecc_correct;
  656. /* TODO: hack */
  657. chip->ecc.read_oob = r852_read_oob;
  658. return 0;
  659. }
  660. static const struct nand_controller_ops r852_ops = {
  661. .attach_chip = r852_attach_chip,
  662. };
  663. static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
  664. {
  665. int error;
  666. struct nand_chip *chip;
  667. struct r852_device *dev;
  668. /* pci initialization */
  669. error = pci_enable_device(pci_dev);
  670. if (error)
  671. goto error1;
  672. pci_set_master(pci_dev);
  673. error = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
  674. if (error)
  675. goto error2;
  676. error = pci_request_regions(pci_dev, DRV_NAME);
  677. if (error)
  678. goto error3;
  679. error = -ENOMEM;
  680. /* init nand chip, but register it only on card insert */
  681. chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
  682. if (!chip)
  683. goto error4;
  684. /* commands */
  685. chip->legacy.cmd_ctrl = r852_cmdctl;
  686. chip->legacy.waitfunc = r852_wait;
  687. chip->legacy.dev_ready = r852_ready;
  688. /* I/O */
  689. chip->legacy.read_byte = r852_read_byte;
  690. chip->legacy.read_buf = r852_read_buf;
  691. chip->legacy.write_buf = r852_write_buf;
  692. /* init our device structure */
  693. dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
  694. if (!dev)
  695. goto error5;
  696. nand_set_controller_data(chip, dev);
  697. dev->chip = chip;
  698. dev->pci_dev = pci_dev;
  699. pci_set_drvdata(pci_dev, dev);
  700. nand_controller_init(&dev->controller);
  701. dev->controller.ops = &r852_ops;
  702. chip->controller = &dev->controller;
  703. dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN,
  704. &dev->phys_bounce_buffer, GFP_KERNEL);
  705. if (!dev->bounce_buffer)
  706. goto error6;
  707. error = -ENODEV;
  708. dev->mmio = pci_ioremap_bar(pci_dev, 0);
  709. if (!dev->mmio)
  710. goto error7;
  711. error = -ENOMEM;
  712. dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
  713. if (!dev->tmp_buffer)
  714. goto error8;
  715. init_completion(&dev->dma_done);
  716. dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
  717. if (!dev->card_workqueue)
  718. goto error9;
  719. INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
  720. /* shutdown everything - precation */
  721. r852_engine_disable(dev);
  722. r852_disable_irqs(dev);
  723. r852_dma_test(dev);
  724. dev->irq = pci_dev->irq;
  725. spin_lock_init(&dev->irqlock);
  726. dev->card_detected = 0;
  727. r852_card_update_present(dev);
  728. /*register irq handler*/
  729. error = -ENODEV;
  730. if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
  731. DRV_NAME, dev))
  732. goto error10;
  733. /* kick initial present test */
  734. queue_delayed_work(dev->card_workqueue,
  735. &dev->card_detect_work, 0);
  736. pr_notice("driver loaded successfully\n");
  737. return 0;
  738. error10:
  739. destroy_workqueue(dev->card_workqueue);
  740. error9:
  741. kfree(dev->tmp_buffer);
  742. error8:
  743. pci_iounmap(pci_dev, dev->mmio);
  744. error7:
  745. dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
  746. dev->phys_bounce_buffer);
  747. error6:
  748. kfree(dev);
  749. error5:
  750. kfree(chip);
  751. error4:
  752. pci_release_regions(pci_dev);
  753. error3:
  754. error2:
  755. pci_disable_device(pci_dev);
  756. error1:
  757. return error;
  758. }
  759. static void r852_remove(struct pci_dev *pci_dev)
  760. {
  761. struct r852_device *dev = pci_get_drvdata(pci_dev);
  762. /* Stop detect workqueue -
  763. we are going to unregister the device anyway*/
  764. cancel_delayed_work_sync(&dev->card_detect_work);
  765. destroy_workqueue(dev->card_workqueue);
  766. /* Unregister the device, this might make more IO */
  767. r852_unregister_nand_device(dev);
  768. /* Stop interrupts */
  769. r852_disable_irqs(dev);
  770. free_irq(dev->irq, dev);
  771. /* Cleanup */
  772. kfree(dev->tmp_buffer);
  773. pci_iounmap(pci_dev, dev->mmio);
  774. dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
  775. dev->phys_bounce_buffer);
  776. kfree(dev->chip);
  777. kfree(dev);
  778. /* Shutdown the PCI device */
  779. pci_release_regions(pci_dev);
  780. pci_disable_device(pci_dev);
  781. }
  782. static void r852_shutdown(struct pci_dev *pci_dev)
  783. {
  784. struct r852_device *dev = pci_get_drvdata(pci_dev);
  785. cancel_delayed_work_sync(&dev->card_detect_work);
  786. r852_disable_irqs(dev);
  787. synchronize_irq(dev->irq);
  788. pci_disable_device(pci_dev);
  789. }
  790. #ifdef CONFIG_PM_SLEEP
  791. static int r852_suspend(struct device *device)
  792. {
  793. struct r852_device *dev = dev_get_drvdata(device);
  794. if (dev->ctlreg & R852_CTL_CARDENABLE)
  795. return -EBUSY;
  796. /* First make sure the detect work is gone */
  797. cancel_delayed_work_sync(&dev->card_detect_work);
  798. /* Turn off the interrupts and stop the device */
  799. r852_disable_irqs(dev);
  800. r852_engine_disable(dev);
  801. /* If card was pulled off just during the suspend, which is very
  802. unlikely, we will remove it on resume, it too late now
  803. anyway... */
  804. dev->card_unstable = 0;
  805. return 0;
  806. }
  807. static int r852_resume(struct device *device)
  808. {
  809. struct r852_device *dev = dev_get_drvdata(device);
  810. r852_disable_irqs(dev);
  811. r852_card_update_present(dev);
  812. r852_engine_disable(dev);
  813. /* If card status changed, just do the work */
  814. if (dev->card_detected != dev->card_registered) {
  815. dbg("card was %s during low power state",
  816. dev->card_detected ? "added" : "removed");
  817. queue_delayed_work(dev->card_workqueue,
  818. &dev->card_detect_work, msecs_to_jiffies(1000));
  819. return 0;
  820. }
  821. /* Otherwise, initialize the card */
  822. if (dev->card_registered) {
  823. r852_engine_enable(dev);
  824. nand_select_target(dev->chip, 0);
  825. nand_reset_op(dev->chip);
  826. nand_deselect_target(dev->chip);
  827. }
  828. /* Program card detection IRQ */
  829. r852_update_card_detect(dev);
  830. return 0;
  831. }
  832. #endif
  833. static const struct pci_device_id r852_pci_id_tbl[] = {
  834. { PCI_VDEVICE(RICOH, 0x0852), },
  835. { },
  836. };
  837. MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
  838. static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
  839. static struct pci_driver r852_pci_driver = {
  840. .name = DRV_NAME,
  841. .id_table = r852_pci_id_tbl,
  842. .probe = r852_probe,
  843. .remove = r852_remove,
  844. .shutdown = r852_shutdown,
  845. .driver.pm = &r852_pm_ops,
  846. };
  847. module_pci_driver(r852_pci_driver);
  848. MODULE_LICENSE("GPL");
  849. MODULE_AUTHOR("Maxim Levitsky <[email protected]>");
  850. MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");