spi-mem.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2018 Exceet Electronics GmbH
  4. * Copyright (C) 2018 Bootlin
  5. *
  6. * Author: Boris Brezillon <[email protected]>
  7. */
  8. #include <linux/dmaengine.h>
  9. #include <linux/iopoll.h>
  10. #include <linux/pm_runtime.h>
  11. #include <linux/spi/spi.h>
  12. #include <linux/spi/spi-mem.h>
  13. #include <linux/sched/task_stack.h>
  14. #include "internals.h"
  15. #define SPI_MEM_MAX_BUSWIDTH 8
  16. /**
  17. * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
  18. * memory operation
  19. * @ctlr: the SPI controller requesting this dma_map()
  20. * @op: the memory operation containing the buffer to map
  21. * @sgt: a pointer to a non-initialized sg_table that will be filled by this
  22. * function
  23. *
  24. * Some controllers might want to do DMA on the data buffer embedded in @op.
  25. * This helper prepares everything for you and provides a ready-to-use
  26. * sg_table. This function is not intended to be called from spi drivers.
  27. * Only SPI controller drivers should use it.
  28. * Note that the caller must ensure the memory region pointed by
  29. * op->data.buf.{in,out} is DMA-able before calling this function.
  30. *
  31. * Return: 0 in case of success, a negative error code otherwise.
  32. */
  33. int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
  34. const struct spi_mem_op *op,
  35. struct sg_table *sgt)
  36. {
  37. struct device *dmadev;
  38. if (!op->data.nbytes)
  39. return -EINVAL;
  40. if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
  41. dmadev = ctlr->dma_tx->device->dev;
  42. else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
  43. dmadev = ctlr->dma_rx->device->dev;
  44. else
  45. dmadev = ctlr->dev.parent;
  46. if (!dmadev)
  47. return -EINVAL;
  48. return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
  49. op->data.dir == SPI_MEM_DATA_IN ?
  50. DMA_FROM_DEVICE : DMA_TO_DEVICE);
  51. }
  52. EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
  53. /**
  54. * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
  55. * memory operation
  56. * @ctlr: the SPI controller requesting this dma_unmap()
  57. * @op: the memory operation containing the buffer to unmap
  58. * @sgt: a pointer to an sg_table previously initialized by
  59. * spi_controller_dma_map_mem_op_data()
  60. *
  61. * Some controllers might want to do DMA on the data buffer embedded in @op.
  62. * This helper prepares things so that the CPU can access the
  63. * op->data.buf.{in,out} buffer again.
  64. *
  65. * This function is not intended to be called from SPI drivers. Only SPI
  66. * controller drivers should use it.
  67. *
  68. * This function should be called after the DMA operation has finished and is
  69. * only valid if the previous spi_controller_dma_map_mem_op_data() call
  70. * returned 0.
  71. *
  72. * Return: 0 in case of success, a negative error code otherwise.
  73. */
  74. void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
  75. const struct spi_mem_op *op,
  76. struct sg_table *sgt)
  77. {
  78. struct device *dmadev;
  79. if (!op->data.nbytes)
  80. return;
  81. if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
  82. dmadev = ctlr->dma_tx->device->dev;
  83. else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
  84. dmadev = ctlr->dma_rx->device->dev;
  85. else
  86. dmadev = ctlr->dev.parent;
  87. spi_unmap_buf(ctlr, dmadev, sgt,
  88. op->data.dir == SPI_MEM_DATA_IN ?
  89. DMA_FROM_DEVICE : DMA_TO_DEVICE);
  90. }
  91. EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
  92. static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
  93. {
  94. u32 mode = mem->spi->mode;
  95. switch (buswidth) {
  96. case 1:
  97. return 0;
  98. case 2:
  99. if ((tx &&
  100. (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
  101. (!tx &&
  102. (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
  103. return 0;
  104. break;
  105. case 4:
  106. if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
  107. (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
  108. return 0;
  109. break;
  110. case 8:
  111. if ((tx && (mode & SPI_TX_OCTAL)) ||
  112. (!tx && (mode & SPI_RX_OCTAL)))
  113. return 0;
  114. break;
  115. default:
  116. break;
  117. }
  118. return -ENOTSUPP;
  119. }
  120. static bool spi_mem_check_buswidth(struct spi_mem *mem,
  121. const struct spi_mem_op *op)
  122. {
  123. if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
  124. return false;
  125. if (op->addr.nbytes &&
  126. spi_check_buswidth_req(mem, op->addr.buswidth, true))
  127. return false;
  128. if (op->dummy.nbytes &&
  129. spi_check_buswidth_req(mem, op->dummy.buswidth, true))
  130. return false;
  131. if (op->data.dir != SPI_MEM_NO_DATA &&
  132. spi_check_buswidth_req(mem, op->data.buswidth,
  133. op->data.dir == SPI_MEM_DATA_OUT))
  134. return false;
  135. return true;
  136. }
  137. bool spi_mem_default_supports_op(struct spi_mem *mem,
  138. const struct spi_mem_op *op)
  139. {
  140. struct spi_controller *ctlr = mem->spi->controller;
  141. bool op_is_dtr =
  142. op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr;
  143. if (op_is_dtr) {
  144. if (!spi_mem_controller_is_capable(ctlr, dtr))
  145. return false;
  146. if (op->cmd.nbytes != 2)
  147. return false;
  148. } else {
  149. if (op->cmd.nbytes != 1)
  150. return false;
  151. }
  152. if (op->data.ecc) {
  153. if (!spi_mem_controller_is_capable(ctlr, ecc))
  154. return false;
  155. }
  156. return spi_mem_check_buswidth(mem, op);
  157. }
  158. EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
  159. static bool spi_mem_buswidth_is_valid(u8 buswidth)
  160. {
  161. if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
  162. return false;
  163. return true;
  164. }
  165. static int spi_mem_check_op(const struct spi_mem_op *op)
  166. {
  167. if (!op->cmd.buswidth || !op->cmd.nbytes)
  168. return -EINVAL;
  169. if ((op->addr.nbytes && !op->addr.buswidth) ||
  170. (op->dummy.nbytes && !op->dummy.buswidth) ||
  171. (op->data.nbytes && !op->data.buswidth))
  172. return -EINVAL;
  173. if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
  174. !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
  175. !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
  176. !spi_mem_buswidth_is_valid(op->data.buswidth))
  177. return -EINVAL;
  178. /* Buffers must be DMA-able. */
  179. if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
  180. object_is_on_stack(op->data.buf.in)))
  181. return -EINVAL;
  182. if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
  183. object_is_on_stack(op->data.buf.out)))
  184. return -EINVAL;
  185. return 0;
  186. }
  187. static bool spi_mem_internal_supports_op(struct spi_mem *mem,
  188. const struct spi_mem_op *op)
  189. {
  190. struct spi_controller *ctlr = mem->spi->controller;
  191. if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
  192. return ctlr->mem_ops->supports_op(mem, op);
  193. return spi_mem_default_supports_op(mem, op);
  194. }
  195. /**
  196. * spi_mem_supports_op() - Check if a memory device and the controller it is
  197. * connected to support a specific memory operation
  198. * @mem: the SPI memory
  199. * @op: the memory operation to check
  200. *
  201. * Some controllers are only supporting Single or Dual IOs, others might only
  202. * support specific opcodes, or it can even be that the controller and device
  203. * both support Quad IOs but the hardware prevents you from using it because
  204. * only 2 IO lines are connected.
  205. *
  206. * This function checks whether a specific operation is supported.
  207. *
  208. * Return: true if @op is supported, false otherwise.
  209. */
  210. bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
  211. {
  212. if (spi_mem_check_op(op))
  213. return false;
  214. return spi_mem_internal_supports_op(mem, op);
  215. }
  216. EXPORT_SYMBOL_GPL(spi_mem_supports_op);
  217. static int spi_mem_access_start(struct spi_mem *mem)
  218. {
  219. struct spi_controller *ctlr = mem->spi->controller;
  220. /*
  221. * Flush the message queue before executing our SPI memory
  222. * operation to prevent preemption of regular SPI transfers.
  223. */
  224. spi_flush_queue(ctlr);
  225. if (ctlr->auto_runtime_pm) {
  226. int ret;
  227. ret = pm_runtime_resume_and_get(ctlr->dev.parent);
  228. if (ret < 0) {
  229. dev_err(&ctlr->dev, "Failed to power device: %d\n",
  230. ret);
  231. return ret;
  232. }
  233. }
  234. mutex_lock(&ctlr->bus_lock_mutex);
  235. mutex_lock(&ctlr->io_mutex);
  236. return 0;
  237. }
  238. static void spi_mem_access_end(struct spi_mem *mem)
  239. {
  240. struct spi_controller *ctlr = mem->spi->controller;
  241. mutex_unlock(&ctlr->io_mutex);
  242. mutex_unlock(&ctlr->bus_lock_mutex);
  243. if (ctlr->auto_runtime_pm)
  244. pm_runtime_put(ctlr->dev.parent);
  245. }
  246. /**
  247. * spi_mem_exec_op() - Execute a memory operation
  248. * @mem: the SPI memory
  249. * @op: the memory operation to execute
  250. *
  251. * Executes a memory operation.
  252. *
  253. * This function first checks that @op is supported and then tries to execute
  254. * it.
  255. *
  256. * Return: 0 in case of success, a negative error code otherwise.
  257. */
  258. int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
  259. {
  260. unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
  261. struct spi_controller *ctlr = mem->spi->controller;
  262. struct spi_transfer xfers[4] = { };
  263. struct spi_message msg;
  264. u8 *tmpbuf;
  265. int ret;
  266. ret = spi_mem_check_op(op);
  267. if (ret)
  268. return ret;
  269. if (!spi_mem_internal_supports_op(mem, op))
  270. return -ENOTSUPP;
  271. if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
  272. ret = spi_mem_access_start(mem);
  273. if (ret)
  274. return ret;
  275. ret = ctlr->mem_ops->exec_op(mem, op);
  276. spi_mem_access_end(mem);
  277. /*
  278. * Some controllers only optimize specific paths (typically the
  279. * read path) and expect the core to use the regular SPI
  280. * interface in other cases.
  281. */
  282. if (!ret || ret != -ENOTSUPP)
  283. return ret;
  284. }
  285. tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
  286. /*
  287. * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
  288. * we're guaranteed that this buffer is DMA-able, as required by the
  289. * SPI layer.
  290. */
  291. tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
  292. if (!tmpbuf)
  293. return -ENOMEM;
  294. spi_message_init(&msg);
  295. tmpbuf[0] = op->cmd.opcode;
  296. xfers[xferpos].tx_buf = tmpbuf;
  297. xfers[xferpos].len = op->cmd.nbytes;
  298. xfers[xferpos].tx_nbits = op->cmd.buswidth;
  299. spi_message_add_tail(&xfers[xferpos], &msg);
  300. xferpos++;
  301. totalxferlen++;
  302. if (op->addr.nbytes) {
  303. int i;
  304. for (i = 0; i < op->addr.nbytes; i++)
  305. tmpbuf[i + 1] = op->addr.val >>
  306. (8 * (op->addr.nbytes - i - 1));
  307. xfers[xferpos].tx_buf = tmpbuf + 1;
  308. xfers[xferpos].len = op->addr.nbytes;
  309. xfers[xferpos].tx_nbits = op->addr.buswidth;
  310. spi_message_add_tail(&xfers[xferpos], &msg);
  311. xferpos++;
  312. totalxferlen += op->addr.nbytes;
  313. }
  314. if (op->dummy.nbytes) {
  315. memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
  316. xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
  317. xfers[xferpos].len = op->dummy.nbytes;
  318. xfers[xferpos].tx_nbits = op->dummy.buswidth;
  319. xfers[xferpos].dummy_data = 1;
  320. spi_message_add_tail(&xfers[xferpos], &msg);
  321. xferpos++;
  322. totalxferlen += op->dummy.nbytes;
  323. }
  324. if (op->data.nbytes) {
  325. if (op->data.dir == SPI_MEM_DATA_IN) {
  326. xfers[xferpos].rx_buf = op->data.buf.in;
  327. xfers[xferpos].rx_nbits = op->data.buswidth;
  328. } else {
  329. xfers[xferpos].tx_buf = op->data.buf.out;
  330. xfers[xferpos].tx_nbits = op->data.buswidth;
  331. }
  332. xfers[xferpos].len = op->data.nbytes;
  333. spi_message_add_tail(&xfers[xferpos], &msg);
  334. xferpos++;
  335. totalxferlen += op->data.nbytes;
  336. }
  337. ret = spi_sync(mem->spi, &msg);
  338. kfree(tmpbuf);
  339. if (ret)
  340. return ret;
  341. if (msg.actual_length != totalxferlen)
  342. return -EIO;
  343. return 0;
  344. }
  345. EXPORT_SYMBOL_GPL(spi_mem_exec_op);
  346. /**
  347. * spi_mem_get_name() - Return the SPI mem device name to be used by the
  348. * upper layer if necessary
  349. * @mem: the SPI memory
  350. *
  351. * This function allows SPI mem users to retrieve the SPI mem device name.
  352. * It is useful if the upper layer needs to expose a custom name for
  353. * compatibility reasons.
  354. *
  355. * Return: a string containing the name of the memory device to be used
  356. * by the SPI mem user
  357. */
  358. const char *spi_mem_get_name(struct spi_mem *mem)
  359. {
  360. return mem->name;
  361. }
  362. EXPORT_SYMBOL_GPL(spi_mem_get_name);
  363. /**
  364. * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
  365. * match controller limitations
  366. * @mem: the SPI memory
  367. * @op: the operation to adjust
  368. *
  369. * Some controllers have FIFO limitations and must split a data transfer
  370. * operation into multiple ones, others require a specific alignment for
  371. * optimized accesses. This function allows SPI mem drivers to split a single
  372. * operation into multiple sub-operations when required.
  373. *
  374. * Return: a negative error code if the controller can't properly adjust @op,
  375. * 0 otherwise. Note that @op->data.nbytes will be updated if @op
  376. * can't be handled in a single step.
  377. */
  378. int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
  379. {
  380. struct spi_controller *ctlr = mem->spi->controller;
  381. size_t len;
  382. if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
  383. return ctlr->mem_ops->adjust_op_size(mem, op);
  384. if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
  385. len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
  386. if (len > spi_max_transfer_size(mem->spi))
  387. return -EINVAL;
  388. op->data.nbytes = min3((size_t)op->data.nbytes,
  389. spi_max_transfer_size(mem->spi),
  390. spi_max_message_size(mem->spi) -
  391. len);
  392. if (!op->data.nbytes)
  393. return -EINVAL;
  394. }
  395. return 0;
  396. }
  397. EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
  398. static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
  399. u64 offs, size_t len, void *buf)
  400. {
  401. struct spi_mem_op op = desc->info.op_tmpl;
  402. int ret;
  403. op.addr.val = desc->info.offset + offs;
  404. op.data.buf.in = buf;
  405. op.data.nbytes = len;
  406. ret = spi_mem_adjust_op_size(desc->mem, &op);
  407. if (ret)
  408. return ret;
  409. ret = spi_mem_exec_op(desc->mem, &op);
  410. if (ret)
  411. return ret;
  412. return op.data.nbytes;
  413. }
  414. static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
  415. u64 offs, size_t len, const void *buf)
  416. {
  417. struct spi_mem_op op = desc->info.op_tmpl;
  418. int ret;
  419. op.addr.val = desc->info.offset + offs;
  420. op.data.buf.out = buf;
  421. op.data.nbytes = len;
  422. ret = spi_mem_adjust_op_size(desc->mem, &op);
  423. if (ret)
  424. return ret;
  425. ret = spi_mem_exec_op(desc->mem, &op);
  426. if (ret)
  427. return ret;
  428. return op.data.nbytes;
  429. }
  430. /**
  431. * spi_mem_dirmap_create() - Create a direct mapping descriptor
  432. * @mem: SPI mem device this direct mapping should be created for
  433. * @info: direct mapping information
  434. *
  435. * This function is creating a direct mapping descriptor which can then be used
  436. * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
  437. * If the SPI controller driver does not support direct mapping, this function
  438. * falls back to an implementation using spi_mem_exec_op(), so that the caller
  439. * doesn't have to bother implementing a fallback on his own.
  440. *
  441. * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
  442. */
  443. struct spi_mem_dirmap_desc *
  444. spi_mem_dirmap_create(struct spi_mem *mem,
  445. const struct spi_mem_dirmap_info *info)
  446. {
  447. struct spi_controller *ctlr = mem->spi->controller;
  448. struct spi_mem_dirmap_desc *desc;
  449. int ret = -ENOTSUPP;
  450. /* Make sure the number of address cycles is between 1 and 8 bytes. */
  451. if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
  452. return ERR_PTR(-EINVAL);
  453. /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
  454. if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
  455. return ERR_PTR(-EINVAL);
  456. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  457. if (!desc)
  458. return ERR_PTR(-ENOMEM);
  459. desc->mem = mem;
  460. desc->info = *info;
  461. if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
  462. ret = ctlr->mem_ops->dirmap_create(desc);
  463. if (ret) {
  464. desc->nodirmap = true;
  465. if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
  466. ret = -ENOTSUPP;
  467. else
  468. ret = 0;
  469. }
  470. if (ret) {
  471. kfree(desc);
  472. return ERR_PTR(ret);
  473. }
  474. return desc;
  475. }
  476. EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
  477. /**
  478. * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
  479. * @desc: the direct mapping descriptor to destroy
  480. *
  481. * This function destroys a direct mapping descriptor previously created by
  482. * spi_mem_dirmap_create().
  483. */
  484. void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
  485. {
  486. struct spi_controller *ctlr = desc->mem->spi->controller;
  487. if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
  488. ctlr->mem_ops->dirmap_destroy(desc);
  489. kfree(desc);
  490. }
  491. EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
  492. static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
  493. {
  494. struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
  495. spi_mem_dirmap_destroy(desc);
  496. }
  497. /**
  498. * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
  499. * it to a device
  500. * @dev: device the dirmap desc will be attached to
  501. * @mem: SPI mem device this direct mapping should be created for
  502. * @info: direct mapping information
  503. *
  504. * devm_ variant of the spi_mem_dirmap_create() function. See
  505. * spi_mem_dirmap_create() for more details.
  506. *
  507. * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
  508. */
  509. struct spi_mem_dirmap_desc *
  510. devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
  511. const struct spi_mem_dirmap_info *info)
  512. {
  513. struct spi_mem_dirmap_desc **ptr, *desc;
  514. ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
  515. GFP_KERNEL);
  516. if (!ptr)
  517. return ERR_PTR(-ENOMEM);
  518. desc = spi_mem_dirmap_create(mem, info);
  519. if (IS_ERR(desc)) {
  520. devres_free(ptr);
  521. } else {
  522. *ptr = desc;
  523. devres_add(dev, ptr);
  524. }
  525. return desc;
  526. }
  527. EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
  528. static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
  529. {
  530. struct spi_mem_dirmap_desc **ptr = res;
  531. if (WARN_ON(!ptr || !*ptr))
  532. return 0;
  533. return *ptr == data;
  534. }
  535. /**
  536. * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
  537. * to a device
  538. * @dev: device the dirmap desc is attached to
  539. * @desc: the direct mapping descriptor to destroy
  540. *
  541. * devm_ variant of the spi_mem_dirmap_destroy() function. See
  542. * spi_mem_dirmap_destroy() for more details.
  543. */
  544. void devm_spi_mem_dirmap_destroy(struct device *dev,
  545. struct spi_mem_dirmap_desc *desc)
  546. {
  547. devres_release(dev, devm_spi_mem_dirmap_release,
  548. devm_spi_mem_dirmap_match, desc);
  549. }
  550. EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
  551. /**
  552. * spi_mem_dirmap_read() - Read data through a direct mapping
  553. * @desc: direct mapping descriptor
  554. * @offs: offset to start reading from. Note that this is not an absolute
  555. * offset, but the offset within the direct mapping which already has
  556. * its own offset
  557. * @len: length in bytes
  558. * @buf: destination buffer. This buffer must be DMA-able
  559. *
  560. * This function reads data from a memory device using a direct mapping
  561. * previously instantiated with spi_mem_dirmap_create().
  562. *
  563. * Return: the amount of data read from the memory device or a negative error
  564. * code. Note that the returned size might be smaller than @len, and the caller
  565. * is responsible for calling spi_mem_dirmap_read() again when that happens.
  566. */
  567. ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
  568. u64 offs, size_t len, void *buf)
  569. {
  570. struct spi_controller *ctlr = desc->mem->spi->controller;
  571. ssize_t ret;
  572. if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
  573. return -EINVAL;
  574. if (!len)
  575. return 0;
  576. if (desc->nodirmap) {
  577. ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
  578. } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
  579. ret = spi_mem_access_start(desc->mem);
  580. if (ret)
  581. return ret;
  582. ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
  583. spi_mem_access_end(desc->mem);
  584. } else {
  585. ret = -ENOTSUPP;
  586. }
  587. return ret;
  588. }
  589. EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
  590. /**
  591. * spi_mem_dirmap_write() - Write data through a direct mapping
  592. * @desc: direct mapping descriptor
  593. * @offs: offset to start writing from. Note that this is not an absolute
  594. * offset, but the offset within the direct mapping which already has
  595. * its own offset
  596. * @len: length in bytes
  597. * @buf: source buffer. This buffer must be DMA-able
  598. *
  599. * This function writes data to a memory device using a direct mapping
  600. * previously instantiated with spi_mem_dirmap_create().
  601. *
  602. * Return: the amount of data written to the memory device or a negative error
  603. * code. Note that the returned size might be smaller than @len, and the caller
  604. * is responsible for calling spi_mem_dirmap_write() again when that happens.
  605. */
  606. ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
  607. u64 offs, size_t len, const void *buf)
  608. {
  609. struct spi_controller *ctlr = desc->mem->spi->controller;
  610. ssize_t ret;
  611. if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
  612. return -EINVAL;
  613. if (!len)
  614. return 0;
  615. if (desc->nodirmap) {
  616. ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
  617. } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
  618. ret = spi_mem_access_start(desc->mem);
  619. if (ret)
  620. return ret;
  621. ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
  622. spi_mem_access_end(desc->mem);
  623. } else {
  624. ret = -ENOTSUPP;
  625. }
  626. return ret;
  627. }
  628. EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
  629. static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
  630. {
  631. return container_of(drv, struct spi_mem_driver, spidrv.driver);
  632. }
  633. static int spi_mem_read_status(struct spi_mem *mem,
  634. const struct spi_mem_op *op,
  635. u16 *status)
  636. {
  637. const u8 *bytes = (u8 *)op->data.buf.in;
  638. int ret;
  639. ret = spi_mem_exec_op(mem, op);
  640. if (ret)
  641. return ret;
  642. if (op->data.nbytes > 1)
  643. *status = ((u16)bytes[0] << 8) | bytes[1];
  644. else
  645. *status = bytes[0];
  646. return 0;
  647. }
  648. /**
  649. * spi_mem_poll_status() - Poll memory device status
  650. * @mem: SPI memory device
  651. * @op: the memory operation to execute
  652. * @mask: status bitmask to ckeck
  653. * @match: (status & mask) expected value
  654. * @initial_delay_us: delay in us before starting to poll
  655. * @polling_delay_us: time to sleep between reads in us
  656. * @timeout_ms: timeout in milliseconds
  657. *
  658. * This function polls a status register and returns when
  659. * (status & mask) == match or when the timeout has expired.
  660. *
  661. * Return: 0 in case of success, -ETIMEDOUT in case of error,
  662. * -EOPNOTSUPP if not supported.
  663. */
  664. int spi_mem_poll_status(struct spi_mem *mem,
  665. const struct spi_mem_op *op,
  666. u16 mask, u16 match,
  667. unsigned long initial_delay_us,
  668. unsigned long polling_delay_us,
  669. u16 timeout_ms)
  670. {
  671. struct spi_controller *ctlr = mem->spi->controller;
  672. int ret = -EOPNOTSUPP;
  673. int read_status_ret;
  674. u16 status;
  675. if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
  676. op->data.dir != SPI_MEM_DATA_IN)
  677. return -EINVAL;
  678. if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) {
  679. ret = spi_mem_access_start(mem);
  680. if (ret)
  681. return ret;
  682. ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
  683. initial_delay_us, polling_delay_us,
  684. timeout_ms);
  685. spi_mem_access_end(mem);
  686. }
  687. if (ret == -EOPNOTSUPP) {
  688. if (!spi_mem_supports_op(mem, op))
  689. return ret;
  690. if (initial_delay_us < 10)
  691. udelay(initial_delay_us);
  692. else
  693. usleep_range((initial_delay_us >> 2) + 1,
  694. initial_delay_us);
  695. ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
  696. (read_status_ret || ((status) & mask) == match),
  697. polling_delay_us, timeout_ms * 1000, false, mem,
  698. op, &status);
  699. if (read_status_ret)
  700. return read_status_ret;
  701. }
  702. return ret;
  703. }
  704. EXPORT_SYMBOL_GPL(spi_mem_poll_status);
  705. static int spi_mem_probe(struct spi_device *spi)
  706. {
  707. struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
  708. struct spi_controller *ctlr = spi->controller;
  709. struct spi_mem *mem;
  710. mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
  711. if (!mem)
  712. return -ENOMEM;
  713. mem->spi = spi;
  714. if (ctlr->mem_ops && ctlr->mem_ops->get_name)
  715. mem->name = ctlr->mem_ops->get_name(mem);
  716. else
  717. mem->name = dev_name(&spi->dev);
  718. if (IS_ERR_OR_NULL(mem->name))
  719. return PTR_ERR_OR_ZERO(mem->name);
  720. spi_set_drvdata(spi, mem);
  721. return memdrv->probe(mem);
  722. }
  723. static void spi_mem_remove(struct spi_device *spi)
  724. {
  725. struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
  726. struct spi_mem *mem = spi_get_drvdata(spi);
  727. if (memdrv->remove)
  728. memdrv->remove(mem);
  729. }
  730. static void spi_mem_shutdown(struct spi_device *spi)
  731. {
  732. struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
  733. struct spi_mem *mem = spi_get_drvdata(spi);
  734. if (memdrv->shutdown)
  735. memdrv->shutdown(mem);
  736. }
  737. /**
  738. * spi_mem_driver_register_with_owner() - Register a SPI memory driver
  739. * @memdrv: the SPI memory driver to register
  740. * @owner: the owner of this driver
  741. *
  742. * Registers a SPI memory driver.
  743. *
  744. * Return: 0 in case of success, a negative error core otherwise.
  745. */
  746. int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
  747. struct module *owner)
  748. {
  749. memdrv->spidrv.probe = spi_mem_probe;
  750. memdrv->spidrv.remove = spi_mem_remove;
  751. memdrv->spidrv.shutdown = spi_mem_shutdown;
  752. return __spi_register_driver(owner, &memdrv->spidrv);
  753. }
  754. EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
  755. /**
  756. * spi_mem_driver_unregister() - Unregister a SPI memory driver
  757. * @memdrv: the SPI memory driver to unregister
  758. *
  759. * Unregisters a SPI memory driver.
  760. */
  761. void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
  762. {
  763. spi_unregister_driver(&memdrv->spidrv);
  764. }
  765. EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);