wcd-spi.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/of.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/delay.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/bitops.h>
  12. #include <linux/spi/spi.h>
  13. #include <linux/regmap.h>
  14. #include <linux/component.h>
  15. #include <linux/ratelimit.h>
  16. #include <linux/platform_device.h>
  17. #include <sound/wcd-dsp-mgr.h>
  18. #include <sound/wcd-spi.h>
  19. #include <soc/wcd-spi-ac.h>
  20. #include "wcd-spi-registers.h"
  21. /* Byte manipulations */
  22. #define SHIFT_1_BYTES (8)
  23. #define SHIFT_2_BYTES (16)
  24. #define SHIFT_3_BYTES (24)
  25. /* Command opcodes */
  26. #define WCD_SPI_CMD_NOP (0x00)
  27. #define WCD_SPI_CMD_WREN (0x06)
  28. #define WCD_SPI_CMD_CLKREQ (0xDA)
  29. #define WCD_SPI_CMD_RDSR (0x05)
  30. #define WCD_SPI_CMD_IRR (0x81)
  31. #define WCD_SPI_CMD_IRW (0x82)
  32. #define WCD_SPI_CMD_MIOR (0x83)
  33. #define WCD_SPI_CMD_FREAD (0x0B)
  34. #define WCD_SPI_CMD_MIOW (0x02)
  35. #define WCD_SPI_WRITE_FRAME_OPCODE \
  36. (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
  37. #define WCD_SPI_READ_FRAME_OPCODE \
  38. (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
  39. #define WCD_SPI_FREAD_FRAME_OPCODE \
  40. (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
  41. /* Command lengths */
  42. #define WCD_SPI_OPCODE_LEN (0x01)
  43. #define WCD_SPI_CMD_NOP_LEN (0x01)
  44. #define WCD_SPI_CMD_WREN_LEN (0x01)
  45. #define WCD_SPI_CMD_CLKREQ_LEN (0x04)
  46. #define WCD_SPI_CMD_IRR_LEN (0x04)
  47. #define WCD_SPI_CMD_IRW_LEN (0x06)
  48. #define WCD_SPI_WRITE_SINGLE_LEN (0x08)
  49. #define WCD_SPI_READ_SINGLE_LEN (0x13)
  50. #define WCD_SPI_CMD_FREAD_LEN (0x13)
  51. /* Command delays */
  52. #define WCD_SPI_CLKREQ_DELAY_USECS (500)
  53. #define WCD_SPI_CLK_OFF_TIMER_MS (500)
  54. #define WCD_SPI_RESUME_TIMEOUT_MS 100
  55. /* Command masks */
  56. #define WCD_CMD_ADDR_MASK \
  57. (0xFF | \
  58. (0xFF << SHIFT_1_BYTES) | \
  59. (0xFF << SHIFT_2_BYTES))
  60. /* Clock ctrl request related */
  61. #define WCD_SPI_CLK_ENABLE true
  62. #define WCD_SPI_CLK_DISABLE false
  63. #define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
  64. #define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
  65. /* Internal addresses */
  66. #define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
  67. /* Word sizes and min/max lengths */
  68. #define WCD_SPI_WORD_BYTE_CNT (4)
  69. #define WCD_SPI_RW_MULTI_MIN_LEN (16)
  70. /* Max size is 32 bytes less than 64Kbytes */
  71. #define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
  72. /*
  73. * Max size for the pre-allocated buffers is the max
  74. * possible read/write length + 32 bytes for the SPI
  75. * read/write command header itself.
  76. */
  77. #define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
  78. /* Alignment requirements */
  79. #define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
  80. #define WCD_SPI_RW_MULTI_ALIGN (16)
  81. /* Status mask bits */
  82. #define WCD_SPI_CLK_STATE_ENABLED BIT(0)
  83. #define WCD_SPI_IS_SUSPENDED BIT(1)
  84. /* Locking related */
  85. #define WCD_SPI_MUTEX_LOCK(spi, lock) \
  86. { \
  87. dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
  88. __func__, __stringify_1(lock)); \
  89. mutex_lock(&lock); \
  90. }
  91. #define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
  92. { \
  93. dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
  94. __func__, __stringify_1(lock)); \
  95. mutex_unlock(&lock); \
  96. }
  97. struct wcd_spi_debug_data {
  98. struct dentry *dir;
  99. u32 addr;
  100. u32 size;
  101. };
  102. struct wcd_spi_priv {
  103. struct spi_device *spi;
  104. u32 mem_base_addr;
  105. struct regmap *regmap;
  106. /* Message for single transfer */
  107. struct spi_message msg1;
  108. struct spi_transfer xfer1;
  109. /* Message for two transfers */
  110. struct spi_message msg2;
  111. struct spi_transfer xfer2[2];
  112. /* Register access related */
  113. u32 reg_bytes;
  114. u32 val_bytes;
  115. /* Clock requests related */
  116. struct mutex clk_mutex;
  117. int clk_users;
  118. unsigned long status_mask;
  119. struct delayed_work clk_dwork;
  120. /* Transaction related */
  121. struct mutex xfer_mutex;
  122. struct device *m_dev;
  123. struct wdsp_mgr_ops *m_ops;
  124. /* Debugfs related information */
  125. struct wcd_spi_debug_data debug_data;
  126. /* Completion object to indicate system resume completion */
  127. struct completion resume_comp;
  128. /* Buffers to hold memory used for transfers */
  129. void *tx_buf;
  130. void *rx_buf;
  131. /* DMA handles for transfer buffers */
  132. dma_addr_t tx_dma;
  133. dma_addr_t rx_dma;
  134. /* Handle to child (qmi client) device */
  135. struct device *ac_dev;
  136. };
  137. enum xfer_request {
  138. WCD_SPI_XFER_WRITE,
  139. WCD_SPI_XFER_READ,
  140. };
  141. static char *wcd_spi_xfer_req_str(enum xfer_request req)
  142. {
  143. if (req == WCD_SPI_XFER_WRITE)
  144. return "xfer_write";
  145. else if (req == WCD_SPI_XFER_READ)
  146. return "xfer_read";
  147. else
  148. return "xfer_invalid";
  149. }
  150. static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
  151. {
  152. xfer->tx_buf = NULL;
  153. xfer->rx_buf = NULL;
  154. xfer->delay_usecs = 0;
  155. xfer->len = 0;
  156. }
  157. static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
  158. {
  159. return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  160. }
  161. static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
  162. {
  163. struct spi_device *spi = wcd_spi->spi;
  164. if (wcd_spi->clk_users > 0 ||
  165. test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
  166. dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
  167. __func__, wcd_spi->clk_users);
  168. return false;
  169. }
  170. return true;
  171. }
  172. static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
  173. {
  174. struct spi_device *spi = wcd_spi->spi;
  175. int rc = 0;
  176. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  177. /* If the system is already in resumed state, return right away */
  178. if (!wcd_spi_is_suspended(wcd_spi))
  179. goto done;
  180. /* If suspended then wait for resume to happen */
  181. reinit_completion(&wcd_spi->resume_comp);
  182. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  183. rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
  184. msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
  185. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  186. if (rc == 0) {
  187. dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
  188. __func__, WCD_SPI_RESUME_TIMEOUT_MS);
  189. rc = -EIO;
  190. goto done;
  191. }
  192. dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
  193. rc = 0;
  194. done:
  195. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  196. return rc;
  197. }
  198. static int wcd_spi_read_single(struct spi_device *spi,
  199. u32 remote_addr, u32 *val)
  200. {
  201. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  202. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  203. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  204. u8 *tx_buf = wcd_spi->tx_buf;
  205. u8 *rx_buf = wcd_spi->rx_buf;
  206. u32 frame = 0;
  207. int ret;
  208. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
  209. __func__, remote_addr);
  210. if (!tx_buf) {
  211. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  212. __func__);
  213. return -ENOMEM;
  214. }
  215. frame |= WCD_SPI_READ_FRAME_OPCODE;
  216. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  217. wcd_spi_reinit_xfer(tx_xfer);
  218. frame = cpu_to_be32(frame);
  219. memcpy(tx_buf, &frame, sizeof(frame));
  220. tx_xfer->tx_buf = tx_buf;
  221. tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
  222. wcd_spi_reinit_xfer(rx_xfer);
  223. rx_xfer->rx_buf = rx_buf;
  224. rx_xfer->len = sizeof(*val);
  225. ret = spi_sync(spi, &wcd_spi->msg2);
  226. if (ret)
  227. dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
  228. __func__, ret);
  229. else
  230. memcpy((u8*) val, rx_buf, sizeof(*val));
  231. return ret;
  232. }
  233. static int wcd_spi_read_multi(struct spi_device *spi,
  234. u32 remote_addr, u8 *data,
  235. size_t len)
  236. {
  237. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  238. struct spi_transfer *xfer = &wcd_spi->xfer1;
  239. u8 *tx_buf = wcd_spi->tx_buf;
  240. u8 *rx_buf = wcd_spi->rx_buf;
  241. u32 frame = 0;
  242. int ret;
  243. dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
  244. __func__, remote_addr, len);
  245. frame |= WCD_SPI_FREAD_FRAME_OPCODE;
  246. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  247. if (!tx_buf || !rx_buf) {
  248. dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
  249. (!tx_buf) ? "tx_buf" : "rx_buf");
  250. return -ENOMEM;
  251. }
  252. wcd_spi_reinit_xfer(xfer);
  253. frame = cpu_to_be32(frame);
  254. memcpy(tx_buf, &frame, sizeof(frame));
  255. xfer->tx_buf = tx_buf;
  256. xfer->rx_buf = rx_buf;
  257. xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
  258. ret = spi_sync(spi, &wcd_spi->msg1);
  259. if (ret) {
  260. dev_err(&spi->dev, "%s: failed, err = %d\n",
  261. __func__, ret);
  262. goto done;
  263. }
  264. memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
  265. done:
  266. return ret;
  267. }
  268. static int wcd_spi_write_single(struct spi_device *spi,
  269. u32 remote_addr, u32 val)
  270. {
  271. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  272. struct spi_transfer *xfer = &wcd_spi->xfer1;
  273. u8 *tx_buf = wcd_spi->tx_buf;
  274. u32 frame = 0;
  275. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
  276. __func__, remote_addr, val);
  277. memset(tx_buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
  278. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  279. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  280. frame = cpu_to_be32(frame);
  281. memcpy(tx_buf, &frame, sizeof(frame));
  282. memcpy(tx_buf + sizeof(frame), &val, sizeof(val));
  283. wcd_spi_reinit_xfer(xfer);
  284. xfer->tx_buf = tx_buf;
  285. xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
  286. return spi_sync(spi, &wcd_spi->msg1);
  287. }
  288. static int wcd_spi_write_multi(struct spi_device *spi,
  289. u32 remote_addr, u8 *data,
  290. size_t len)
  291. {
  292. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  293. struct spi_transfer *xfer = &wcd_spi->xfer1;
  294. u32 frame = 0;
  295. u8 *tx_buf = wcd_spi->tx_buf;
  296. int xfer_len, ret;
  297. dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
  298. __func__, remote_addr, len);
  299. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  300. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  301. frame = cpu_to_be32(frame);
  302. xfer_len = len + sizeof(frame);
  303. if (!tx_buf) {
  304. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  305. __func__);
  306. return -ENOMEM;
  307. }
  308. memcpy(tx_buf, &frame, sizeof(frame));
  309. memcpy(tx_buf + sizeof(frame), data, len);
  310. wcd_spi_reinit_xfer(xfer);
  311. xfer->tx_buf = tx_buf;
  312. xfer->len = xfer_len;
  313. ret = spi_sync(spi, &wcd_spi->msg1);
  314. if (ret < 0)
  315. dev_err(&spi->dev,
  316. "%s: Failed, addr = 0x%x, len = %zd\n",
  317. __func__, remote_addr, len);
  318. return ret;
  319. }
  320. static int wcd_spi_transfer_split(struct spi_device *spi,
  321. struct wcd_spi_msg *data_msg,
  322. enum xfer_request xfer_req)
  323. {
  324. u32 addr = data_msg->remote_addr;
  325. u8 *data = data_msg->data;
  326. int remain_size = data_msg->len;
  327. int to_xfer, loop_cnt, ret = 0;
  328. /* Perform single writes until multi word alignment is met */
  329. loop_cnt = 1;
  330. while (remain_size &&
  331. !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
  332. if (xfer_req == WCD_SPI_XFER_WRITE)
  333. ret = wcd_spi_write_single(spi, addr,
  334. (*(u32 *)data));
  335. else
  336. ret = wcd_spi_read_single(spi, addr,
  337. (u32 *)data);
  338. if (ret < 0) {
  339. dev_err(&spi->dev,
  340. "%s: %s fail iter(%d) start-word addr (0x%x)\n",
  341. __func__, wcd_spi_xfer_req_str(xfer_req),
  342. loop_cnt, addr);
  343. goto done;
  344. }
  345. addr += WCD_SPI_WORD_BYTE_CNT;
  346. data += WCD_SPI_WORD_BYTE_CNT;
  347. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  348. loop_cnt++;
  349. }
  350. /* Perform multi writes for max allowed multi writes */
  351. loop_cnt = 1;
  352. while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
  353. if (xfer_req == WCD_SPI_XFER_WRITE)
  354. ret = wcd_spi_write_multi(spi, addr, data,
  355. WCD_SPI_RW_MULTI_MAX_LEN);
  356. else
  357. ret = wcd_spi_read_multi(spi, addr, data,
  358. WCD_SPI_RW_MULTI_MAX_LEN);
  359. if (ret < 0) {
  360. dev_err(&spi->dev,
  361. "%s: %s fail iter(%d) max-write addr (0x%x)\n",
  362. __func__, wcd_spi_xfer_req_str(xfer_req),
  363. loop_cnt, addr);
  364. goto done;
  365. }
  366. addr += WCD_SPI_RW_MULTI_MAX_LEN;
  367. data += WCD_SPI_RW_MULTI_MAX_LEN;
  368. remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
  369. loop_cnt++;
  370. }
  371. /*
  372. * Perform write for max possible data that is multiple
  373. * of the minimum size for multi-write commands.
  374. */
  375. to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
  376. if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
  377. to_xfer > 0) {
  378. if (xfer_req == WCD_SPI_XFER_WRITE)
  379. ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
  380. else
  381. ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
  382. if (ret < 0) {
  383. dev_err(&spi->dev,
  384. "%s: %s fail write addr (0x%x), size (0x%x)\n",
  385. __func__, wcd_spi_xfer_req_str(xfer_req),
  386. addr, to_xfer);
  387. goto done;
  388. }
  389. addr += to_xfer;
  390. data += to_xfer;
  391. remain_size -= to_xfer;
  392. }
  393. /* Perform single writes for the last remaining data */
  394. loop_cnt = 1;
  395. while (remain_size > 0) {
  396. if (xfer_req == WCD_SPI_XFER_WRITE)
  397. ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
  398. else
  399. ret = wcd_spi_read_single(spi, addr, (u32 *) data);
  400. if (ret < 0) {
  401. dev_err(&spi->dev,
  402. "%s: %s fail iter(%d) end-write addr (0x%x)\n",
  403. __func__, wcd_spi_xfer_req_str(xfer_req),
  404. loop_cnt, addr);
  405. goto done;
  406. }
  407. addr += WCD_SPI_WORD_BYTE_CNT;
  408. data += WCD_SPI_WORD_BYTE_CNT;
  409. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  410. loop_cnt++;
  411. }
  412. done:
  413. return ret;
  414. }
  415. static int wcd_spi_cmd_nop(struct spi_device *spi)
  416. {
  417. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  418. u8 *tx_buf = wcd_spi->tx_buf;
  419. tx_buf[0] = WCD_SPI_CMD_NOP;
  420. return spi_write(spi, tx_buf, WCD_SPI_CMD_NOP_LEN);
  421. }
  422. static int wcd_spi_cmd_clkreq(struct spi_device *spi)
  423. {
  424. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  425. struct spi_transfer *xfer = &wcd_spi->xfer1;
  426. u8 *tx_buf = wcd_spi->tx_buf;
  427. u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
  428. WCD_SPI_CMD_CLKREQ,
  429. 0xBA, 0x80, 0x00};
  430. memcpy(tx_buf, cmd, WCD_SPI_CMD_CLKREQ_LEN);
  431. wcd_spi_reinit_xfer(xfer);
  432. xfer->tx_buf = tx_buf;
  433. xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
  434. xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
  435. return spi_sync(spi, &wcd_spi->msg1);
  436. }
  437. static int wcd_spi_cmd_wr_en(struct spi_device *spi)
  438. {
  439. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  440. u8 *tx_buf = wcd_spi->tx_buf;
  441. tx_buf[0] = WCD_SPI_CMD_WREN;
  442. return spi_write(spi, tx_buf, WCD_SPI_CMD_WREN_LEN);
  443. }
  444. static int wcd_spi_cmd_rdsr(struct spi_device *spi,
  445. u32 *rdsr_status)
  446. {
  447. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  448. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  449. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  450. u8 *tx_buf = wcd_spi->tx_buf;
  451. u8 *rx_buf = wcd_spi->rx_buf;
  452. int ret;
  453. tx_buf[0] = WCD_SPI_CMD_RDSR;
  454. wcd_spi_reinit_xfer(tx_xfer);
  455. tx_xfer->tx_buf = tx_buf;
  456. tx_xfer->len = WCD_SPI_OPCODE_LEN;
  457. memset(rx_buf, 0, sizeof(*rdsr_status));
  458. wcd_spi_reinit_xfer(rx_xfer);
  459. rx_xfer->rx_buf = rx_buf;
  460. rx_xfer->len = sizeof(*rdsr_status);
  461. ret = spi_sync(spi, &wcd_spi->msg2);
  462. if (ret < 0) {
  463. dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
  464. __func__, ret);
  465. goto done;
  466. }
  467. *rdsr_status = be32_to_cpu(*((u32*)rx_buf));
  468. dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
  469. __func__, *rdsr_status);
  470. done:
  471. return ret;
  472. }
  473. static int wcd_spi_clk_enable(struct spi_device *spi)
  474. {
  475. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  476. int ret;
  477. u32 rd_status = 0;
  478. /* Get the SPI access first */
  479. if (wcd_spi->ac_dev) {
  480. ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
  481. WCD_SPI_ACCESS_REQUEST,
  482. WCD_SPI_AC_DATA_TRANSFER);
  483. if (ret) {
  484. dev_err(&spi->dev,
  485. "%s: Can't get spi access, err = %d\n",
  486. __func__, ret);
  487. return ret;
  488. }
  489. }
  490. ret = wcd_spi_cmd_nop(spi);
  491. if (ret < 0) {
  492. dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
  493. __func__, ret);
  494. goto done;
  495. }
  496. ret = wcd_spi_cmd_clkreq(spi);
  497. if (ret < 0) {
  498. dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
  499. __func__, ret);
  500. goto done;
  501. }
  502. ret = wcd_spi_cmd_nop(spi);
  503. if (ret < 0) {
  504. dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
  505. __func__, ret);
  506. goto done;
  507. }
  508. wcd_spi_cmd_rdsr(spi, &rd_status);
  509. /*
  510. * Read status zero means reads are not
  511. * happenning on the bus, possibly because
  512. * clock request failed.
  513. */
  514. if (rd_status) {
  515. set_bit(WCD_SPI_CLK_STATE_ENABLED,
  516. &wcd_spi->status_mask);
  517. } else {
  518. dev_err(&spi->dev, "%s: RDSR status is zero\n",
  519. __func__);
  520. ret = -EIO;
  521. }
  522. done:
  523. return ret;
  524. }
  525. static int wcd_spi_clk_disable(struct spi_device *spi)
  526. {
  527. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  528. int ret;
  529. ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
  530. if (ret < 0)
  531. dev_err(&spi->dev, "%s: Failed, err = %d\n",
  532. __func__, ret);
  533. /*
  534. * clear this bit even if clock disable failed
  535. * as the source clocks might get turned off.
  536. */
  537. clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
  538. /* once the clock is released, SPI access can be released as well */
  539. if (wcd_spi->ac_dev) {
  540. ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
  541. WCD_SPI_ACCESS_RELEASE,
  542. WCD_SPI_AC_DATA_TRANSFER);
  543. if (ret)
  544. dev_err(&spi->dev,
  545. "%s: SPI access release failed, err = %d\n",
  546. __func__, ret);
  547. }
  548. return ret;
  549. }
  550. static int wcd_spi_clk_ctrl(struct spi_device *spi,
  551. bool request, u32 flags)
  552. {
  553. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  554. int ret = 0;
  555. const char *delay_str;
  556. delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
  557. "delayed" : "immediate";
  558. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  559. /* Reject any unbalanced disable request */
  560. if (wcd_spi->clk_users < 0 ||
  561. (!request && wcd_spi->clk_users == 0)) {
  562. dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
  563. __func__, wcd_spi->clk_users,
  564. request ? "enable" : "disable");
  565. ret = -EINVAL;
  566. /* Reset the clk_users to 0 */
  567. wcd_spi->clk_users = 0;
  568. goto done;
  569. }
  570. if (request == WCD_SPI_CLK_ENABLE) {
  571. /*
  572. * If the SPI bus is suspended, then return error
  573. * as the transaction cannot be completed.
  574. */
  575. if (wcd_spi_is_suspended(wcd_spi)) {
  576. dev_err(&spi->dev,
  577. "%s: SPI suspended, cannot enable clk\n",
  578. __func__);
  579. ret = -EIO;
  580. goto done;
  581. }
  582. /* Cancel the disable clk work */
  583. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  584. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  585. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  586. wcd_spi->clk_users++;
  587. /*
  588. * If clk state is already set,
  589. * then clk wasnt really disabled
  590. */
  591. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  592. goto done;
  593. else if (wcd_spi->clk_users == 1)
  594. ret = wcd_spi_clk_enable(spi);
  595. } else {
  596. wcd_spi->clk_users--;
  597. /* Clock is still voted for */
  598. if (wcd_spi->clk_users > 0)
  599. goto done;
  600. /*
  601. * If we are here, clk_users must be 0 and needs
  602. * to be disabled. Call the disable based on the
  603. * flags.
  604. */
  605. if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
  606. schedule_delayed_work(&wcd_spi->clk_dwork,
  607. msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
  608. } else {
  609. ret = wcd_spi_clk_disable(spi);
  610. if (ret < 0)
  611. dev_err(&spi->dev,
  612. "%s: Failed to disable clk err = %d\n",
  613. __func__, ret);
  614. }
  615. }
  616. done:
  617. dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
  618. __func__, wcd_spi->clk_users, request ? "enable" : "disable",
  619. request ? "" : delay_str);
  620. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  621. return ret;
  622. }
  623. static int wcd_spi_init(struct spi_device *spi)
  624. {
  625. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  626. int ret;
  627. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  628. WCD_SPI_CLK_FLAG_IMMEDIATE);
  629. if (ret < 0)
  630. goto done;
  631. ret = wcd_spi_cmd_wr_en(spi);
  632. if (ret < 0)
  633. goto err_wr_en;
  634. /*
  635. * In case spi_init is called after component deinit,
  636. * it is possible hardware register state is also reset.
  637. * Sync the regcache here so hardware state is updated
  638. * to reflect the cache.
  639. */
  640. regcache_sync(wcd_spi->regmap);
  641. regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
  642. 0x0F3D0800);
  643. /* Write the MTU to max allowed size */
  644. regmap_update_bits(wcd_spi->regmap,
  645. WCD_SPI_SLAVE_TRNS_LEN,
  646. 0xFFFF0000, 0xFFFF0000);
  647. err_wr_en:
  648. wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  649. WCD_SPI_CLK_FLAG_IMMEDIATE);
  650. done:
  651. return ret;
  652. }
  653. static void wcd_spi_clk_work(struct work_struct *work)
  654. {
  655. struct delayed_work *dwork;
  656. struct wcd_spi_priv *wcd_spi;
  657. struct spi_device *spi;
  658. int ret;
  659. dwork = to_delayed_work(work);
  660. wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
  661. spi = wcd_spi->spi;
  662. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  663. ret = wcd_spi_clk_disable(spi);
  664. if (ret < 0)
  665. dev_err(&spi->dev,
  666. "%s: Failed to disable clk, err = %d\n",
  667. __func__, ret);
  668. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  669. }
  670. static int __wcd_spi_data_xfer(struct spi_device *spi,
  671. struct wcd_spi_msg *msg,
  672. enum xfer_request xfer_req)
  673. {
  674. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  675. int ret;
  676. /* Check for minimum alignment requirements */
  677. if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
  678. dev_err(&spi->dev,
  679. "%s addr 0x%x is not aligned to 0x%x\n",
  680. __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
  681. return -EINVAL;
  682. } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
  683. dev_err(&spi->dev,
  684. "%s len 0x%zx is not multiple of %d\n",
  685. __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
  686. return -EINVAL;
  687. }
  688. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
  689. if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
  690. if (xfer_req == WCD_SPI_XFER_WRITE)
  691. ret = wcd_spi_write_single(spi, msg->remote_addr,
  692. (*((u32 *)msg->data)));
  693. else
  694. ret = wcd_spi_read_single(spi, msg->remote_addr,
  695. (u32 *) msg->data);
  696. } else {
  697. ret = wcd_spi_transfer_split(spi, msg, xfer_req);
  698. }
  699. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
  700. return ret;
  701. }
  702. static int wcd_spi_data_xfer(struct spi_device *spi,
  703. struct wcd_spi_msg *msg,
  704. enum xfer_request req)
  705. {
  706. int ret, ret1;
  707. if (msg->len <= 0) {
  708. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  709. __func__, msg->len);
  710. return -EINVAL;
  711. }
  712. /* Request for clock */
  713. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  714. WCD_SPI_CLK_FLAG_IMMEDIATE);
  715. if (ret < 0) {
  716. dev_err(&spi->dev, "%s: clk enable failed %d\n",
  717. __func__, ret);
  718. goto done;
  719. }
  720. /* Perform the transaction */
  721. ret = __wcd_spi_data_xfer(spi, msg, req);
  722. if (ret < 0)
  723. dev_err(&spi->dev,
  724. "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
  725. __func__, wcd_spi_xfer_req_str(req),
  726. msg->remote_addr, msg->len, ret);
  727. /* Release the clock even if xfer failed */
  728. ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  729. WCD_SPI_CLK_FLAG_DELAYED);
  730. if (ret1 < 0)
  731. dev_err(&spi->dev, "%s: clk disable failed %d\n",
  732. __func__, ret1);
  733. done:
  734. return ret;
  735. }
  736. /*
  737. * wcd_spi_data_write: Write data to WCD SPI
  738. * @spi: spi_device struct
  739. * @msg: msg that needs to be written to WCD
  740. *
  741. * This API writes length of data to address specified. These details
  742. * about the write are encapsulated in @msg. Write size should be multiple
  743. * of 4 bytes and write address should be 4-byte aligned.
  744. */
  745. static int wcd_spi_data_write(struct spi_device *spi,
  746. struct wcd_spi_msg *msg)
  747. {
  748. if (!spi || !msg) {
  749. pr_err("%s: Invalid %s\n", __func__,
  750. (!spi) ? "spi device" : "msg");
  751. return -EINVAL;
  752. }
  753. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
  754. __func__, msg->remote_addr, msg->len);
  755. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
  756. }
  757. /*
  758. * wcd_spi_data_read: Read data from WCD SPI
  759. * @spi: spi_device struct
  760. * @msg: msg that needs to be read from WCD
  761. *
  762. * This API reads length of data from address specified. These details
  763. * about the read are encapsulated in @msg. Read size should be multiple
  764. * of 4 bytes and read address should be 4-byte aligned.
  765. */
  766. static int wcd_spi_data_read(struct spi_device *spi,
  767. struct wcd_spi_msg *msg)
  768. {
  769. if (!spi || !msg) {
  770. pr_err("%s: Invalid %s\n", __func__,
  771. (!spi) ? "spi device" : "msg");
  772. return -EINVAL;
  773. }
  774. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
  775. __func__, msg->remote_addr, msg->len);
  776. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
  777. }
  778. static int wdsp_spi_dload_section(struct spi_device *spi,
  779. void *data)
  780. {
  781. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  782. struct wdsp_img_section *sec = data;
  783. struct wcd_spi_msg msg;
  784. int ret;
  785. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  786. __func__, sec->addr, sec->size);
  787. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  788. msg.data = sec->data;
  789. msg.len = sec->size;
  790. ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
  791. if (ret < 0)
  792. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  793. __func__, msg.remote_addr, msg.len);
  794. return ret;
  795. }
  796. static int wdsp_spi_read_section(struct spi_device *spi, void *data)
  797. {
  798. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  799. struct wdsp_img_section *sec = data;
  800. struct wcd_spi_msg msg;
  801. int ret;
  802. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  803. msg.data = sec->data;
  804. msg.len = sec->size;
  805. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  806. __func__, msg.remote_addr, msg.len);
  807. ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
  808. if (ret < 0)
  809. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  810. __func__, msg.remote_addr, msg.len);
  811. return ret;
  812. }
  813. static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
  814. enum wdsp_event_type event,
  815. void *data)
  816. {
  817. struct spi_device *spi = to_spi_device(dev);
  818. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  819. struct wcd_spi_ops *spi_ops;
  820. int ret = 0;
  821. dev_dbg(&spi->dev, "%s: event type %d\n",
  822. __func__, event);
  823. switch (event) {
  824. case WDSP_EVENT_PRE_SHUTDOWN:
  825. if (wcd_spi->ac_dev) {
  826. ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
  827. WCD_SPI_ACCESS_REQUEST,
  828. WCD_SPI_AC_REMOTE_DOWN);
  829. if (ret)
  830. dev_err(&spi->dev,
  831. "%s: request access failed %d\n",
  832. __func__, ret);
  833. }
  834. break;
  835. case WDSP_EVENT_POST_SHUTDOWN:
  836. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  837. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  838. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  839. wcd_spi_clk_disable(spi);
  840. wcd_spi->clk_users = 0;
  841. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  842. break;
  843. case WDSP_EVENT_POST_BOOTUP:
  844. if (wcd_spi->ac_dev) {
  845. ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
  846. WCD_SPI_ACCESS_RELEASE,
  847. WCD_SPI_AC_REMOTE_DOWN);
  848. if (ret)
  849. dev_err(&spi->dev,
  850. "%s: release access failed %d\n",
  851. __func__, ret);
  852. }
  853. break;
  854. case WDSP_EVENT_PRE_DLOAD_CODE:
  855. case WDSP_EVENT_PRE_DLOAD_DATA:
  856. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  857. WCD_SPI_CLK_FLAG_IMMEDIATE);
  858. if (ret < 0)
  859. dev_err(&spi->dev, "%s: clk_req failed %d\n",
  860. __func__, ret);
  861. break;
  862. case WDSP_EVENT_POST_DLOAD_CODE:
  863. case WDSP_EVENT_POST_DLOAD_DATA:
  864. case WDSP_EVENT_DLOAD_FAILED:
  865. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  866. WCD_SPI_CLK_FLAG_IMMEDIATE);
  867. if (ret < 0)
  868. dev_err(&spi->dev, "%s: clk unvote failed %d\n",
  869. __func__, ret);
  870. break;
  871. case WDSP_EVENT_DLOAD_SECTION:
  872. ret = wdsp_spi_dload_section(spi, data);
  873. break;
  874. case WDSP_EVENT_READ_SECTION:
  875. ret = wdsp_spi_read_section(spi, data);
  876. break;
  877. case WDSP_EVENT_SUSPEND:
  878. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  879. if (!wcd_spi_can_suspend(wcd_spi))
  880. ret = -EBUSY;
  881. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  882. break;
  883. case WDSP_EVENT_RESUME:
  884. ret = wcd_spi_wait_for_resume(wcd_spi);
  885. break;
  886. case WDSP_EVENT_GET_DEVOPS:
  887. if (!data) {
  888. dev_err(&spi->dev, "%s: invalid data\n",
  889. __func__);
  890. ret = -EINVAL;
  891. break;
  892. }
  893. spi_ops = (struct wcd_spi_ops *) data;
  894. spi_ops->spi_dev = spi;
  895. spi_ops->read_dev = wcd_spi_data_read;
  896. spi_ops->write_dev = wcd_spi_data_write;
  897. break;
  898. default:
  899. dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
  900. __func__, event);
  901. break;
  902. }
  903. return ret;
  904. }
  905. static int wcd_spi_bus_gwrite(void *context, const void *reg,
  906. size_t reg_len, const void *val,
  907. size_t val_len)
  908. {
  909. struct device *dev = context;
  910. struct spi_device *spi = to_spi_device(dev);
  911. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  912. u8 *tx_buf = wcd_spi->tx_buf;
  913. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  914. val_len != wcd_spi->val_bytes) {
  915. dev_err(&spi->dev,
  916. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  917. __func__, reg_len, val_len);
  918. return -EINVAL;
  919. }
  920. memset(tx_buf, 0, WCD_SPI_CMD_IRW_LEN);
  921. tx_buf[0] = WCD_SPI_CMD_IRW;
  922. tx_buf[1] = *((u8 *)reg);
  923. memcpy(tx_buf + WCD_SPI_OPCODE_LEN + reg_len,
  924. val, val_len);
  925. return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
  926. }
  927. static int wcd_spi_bus_write(void *context, const void *data,
  928. size_t count)
  929. {
  930. struct device *dev = context;
  931. struct spi_device *spi = to_spi_device(dev);
  932. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  933. if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
  934. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  935. __func__, count);
  936. WARN_ON(1);
  937. return -EINVAL;
  938. }
  939. return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
  940. data + wcd_spi->reg_bytes,
  941. count - wcd_spi->reg_bytes);
  942. }
  943. static int wcd_spi_bus_read(void *context, const void *reg,
  944. size_t reg_len, void *val,
  945. size_t val_len)
  946. {
  947. struct device *dev = context;
  948. struct spi_device *spi = to_spi_device(dev);
  949. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  950. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  951. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  952. u8 *tx_buf = wcd_spi->tx_buf;
  953. u8 *rx_buf = wcd_spi->rx_buf;
  954. int ret = 0;
  955. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  956. val_len != wcd_spi->val_bytes) {
  957. dev_err(&spi->dev,
  958. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  959. __func__, reg_len, val_len);
  960. return -EINVAL;
  961. }
  962. memset(tx_buf, 0, WCD_SPI_CMD_IRR_LEN);
  963. tx_buf[0] = WCD_SPI_CMD_IRR;
  964. tx_buf[1] = *((u8 *)reg);
  965. wcd_spi_reinit_xfer(tx_xfer);
  966. tx_xfer->tx_buf = tx_buf;
  967. tx_xfer->rx_buf = NULL;
  968. tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
  969. wcd_spi_reinit_xfer(rx_xfer);
  970. rx_xfer->tx_buf = NULL;
  971. rx_xfer->rx_buf = rx_buf;
  972. rx_xfer->len = val_len;
  973. ret = spi_sync(spi, &wcd_spi->msg2);
  974. if (ret) {
  975. dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
  976. __func__, ret);
  977. goto done;
  978. }
  979. memcpy(val, rx_buf, val_len);
  980. done:
  981. return ret;
  982. }
  983. static struct regmap_bus wcd_spi_regmap_bus = {
  984. .write = wcd_spi_bus_write,
  985. .gather_write = wcd_spi_bus_gwrite,
  986. .read = wcd_spi_bus_read,
  987. .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
  988. .val_format_endian_default = REGMAP_ENDIAN_BIG,
  989. };
  990. static int wcd_spi_state_show(struct seq_file *f, void *ptr)
  991. {
  992. struct spi_device *spi = f->private;
  993. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  994. const char *clk_state, *clk_mutex, *xfer_mutex;
  995. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  996. clk_state = "enabled";
  997. else
  998. clk_state = "disabled";
  999. clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
  1000. "locked" : "unlocked";
  1001. xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
  1002. "locked" : "unlocked";
  1003. seq_printf(f, "clk_state = %s\nclk_users = %d\n"
  1004. "clk_mutex = %s\nxfer_mutex = %s\n",
  1005. clk_state, wcd_spi->clk_users, clk_mutex,
  1006. xfer_mutex);
  1007. return 0;
  1008. }
  1009. static int wcd_spi_state_open(struct inode *inode, struct file *file)
  1010. {
  1011. return single_open(file, wcd_spi_state_show, inode->i_private);
  1012. }
  1013. static const struct file_operations state_fops = {
  1014. .open = wcd_spi_state_open,
  1015. .read = seq_read,
  1016. .llseek = seq_lseek,
  1017. .release = single_release,
  1018. };
  1019. static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
  1020. size_t count, loff_t *ppos)
  1021. {
  1022. struct spi_device *spi = file->private_data;
  1023. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1024. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1025. struct wcd_spi_msg msg;
  1026. ssize_t buf_size, read_count = 0;
  1027. char *buf;
  1028. int ret;
  1029. if (*ppos < 0 || !count)
  1030. return -EINVAL;
  1031. if (dbg_data->size == 0 || dbg_data->addr == 0) {
  1032. dev_err(&spi->dev,
  1033. "%s: Invalid request, size = %u, addr = 0x%x\n",
  1034. __func__, dbg_data->size, dbg_data->addr);
  1035. return 0;
  1036. }
  1037. buf_size = count < dbg_data->size ? count : dbg_data->size;
  1038. buf = kzalloc(buf_size, GFP_KERNEL);
  1039. if (!buf)
  1040. return -ENOMEM;
  1041. msg.data = buf;
  1042. msg.remote_addr = dbg_data->addr;
  1043. msg.len = buf_size;
  1044. msg.flags = 0;
  1045. ret = wcd_spi_data_read(spi, &msg);
  1046. if (ret < 0) {
  1047. dev_err(&spi->dev,
  1048. "%s: Failed to read %zu bytes from addr 0x%x\n",
  1049. __func__, buf_size, msg.remote_addr);
  1050. goto done;
  1051. }
  1052. read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
  1053. done:
  1054. kfree(buf);
  1055. if (ret < 0)
  1056. return ret;
  1057. else
  1058. return read_count;
  1059. }
  1060. static const struct file_operations mem_read_fops = {
  1061. .open = simple_open,
  1062. .read = wcd_spi_debugfs_mem_read,
  1063. };
  1064. static int wcd_spi_debugfs_init(struct spi_device *spi)
  1065. {
  1066. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1067. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1068. int rc = 0;
  1069. dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
  1070. if (IS_ERR_OR_NULL(dbg_data->dir)) {
  1071. dbg_data->dir = NULL;
  1072. rc = -ENODEV;
  1073. goto done;
  1074. }
  1075. debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
  1076. debugfs_create_u32("addr", 0644, dbg_data->dir,
  1077. &dbg_data->addr);
  1078. debugfs_create_u32("size", 0644, dbg_data->dir,
  1079. &dbg_data->size);
  1080. debugfs_create_file("mem_read", 0444, dbg_data->dir,
  1081. spi, &mem_read_fops);
  1082. done:
  1083. return rc;
  1084. }
  1085. static const struct reg_default wcd_spi_defaults[] = {
  1086. {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
  1087. {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
  1088. {WCD_SPI_SLAVE_STATUS, 0x80100000},
  1089. {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
  1090. {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
  1091. {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
  1092. {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
  1093. {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
  1094. {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
  1095. {WCD_SPI_SLAVE_TX, 0x00000000},
  1096. {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
  1097. {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
  1098. {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
  1099. {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
  1100. {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
  1101. {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
  1102. {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
  1103. {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
  1104. {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
  1105. {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
  1106. {WCD_SPI_SLAVE_GENERICS, 0x80000000},
  1107. {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
  1108. };
  1109. static bool wcd_spi_is_volatile_reg(struct device *dev,
  1110. unsigned int reg)
  1111. {
  1112. switch (reg) {
  1113. case WCD_SPI_SLAVE_SANITY:
  1114. case WCD_SPI_SLAVE_STATUS:
  1115. case WCD_SPI_SLAVE_IRQ_STATUS:
  1116. case WCD_SPI_SLAVE_TX:
  1117. case WCD_SPI_SLAVE_SW_RST_IRQ:
  1118. case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
  1119. case WCD_SPI_SLAVE_FIFO_LEVEL:
  1120. case WCD_SPI_SLAVE_GENERICS:
  1121. return true;
  1122. }
  1123. return false;
  1124. }
  1125. static bool wcd_spi_is_readable_reg(struct device *dev,
  1126. unsigned int reg)
  1127. {
  1128. switch (reg) {
  1129. case WCD_SPI_SLAVE_SW_RESET:
  1130. case WCD_SPI_SLAVE_IRQ_CLR:
  1131. case WCD_SPI_SLAVE_IRQ_FORCE:
  1132. return false;
  1133. }
  1134. return true;
  1135. }
  1136. static struct regmap_config wcd_spi_regmap_cfg = {
  1137. .reg_bits = 8,
  1138. .val_bits = 32,
  1139. .cache_type = REGCACHE_RBTREE,
  1140. .reg_defaults = wcd_spi_defaults,
  1141. .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
  1142. .max_register = WCD_SPI_MAX_REGISTER,
  1143. .volatile_reg = wcd_spi_is_volatile_reg,
  1144. .readable_reg = wcd_spi_is_readable_reg,
  1145. };
  1146. static int wcd_spi_add_ac_dev(struct device *dev,
  1147. struct device_node *node)
  1148. {
  1149. struct spi_device *spi = to_spi_device(dev);
  1150. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1151. struct platform_device *pdev;
  1152. int ret = 0;
  1153. pdev = platform_device_alloc("wcd-spi-ac", -1);
  1154. if (IS_ERR_OR_NULL(pdev)) {
  1155. ret = PTR_ERR(pdev);
  1156. dev_err(dev, "%s: pdev alloc failed, ret = %d\n",
  1157. __func__, ret);
  1158. return ret;
  1159. }
  1160. pdev->dev.parent = dev;
  1161. pdev->dev.of_node = node;
  1162. ret = platform_device_add(pdev);
  1163. if (ret) {
  1164. dev_err(dev, "%s: pdev add failed, ret = %d\n",
  1165. __func__, ret);
  1166. goto dealloc_pdev;
  1167. }
  1168. wcd_spi->ac_dev = &pdev->dev;
  1169. return 0;
  1170. dealloc_pdev:
  1171. platform_device_put(pdev);
  1172. return ret;
  1173. }
  1174. static int wdsp_spi_init(struct device *dev, void *priv_data)
  1175. {
  1176. struct spi_device *spi = to_spi_device(dev);
  1177. int ret;
  1178. struct device_node *node;
  1179. for_each_child_of_node(dev->of_node, node) {
  1180. if (!strcmp(node->name, "wcd_spi_ac"))
  1181. wcd_spi_add_ac_dev(dev, node);
  1182. }
  1183. ret = wcd_spi_init(spi);
  1184. if (ret < 0)
  1185. dev_err(&spi->dev, "%s: Init failed, err = %d\n",
  1186. __func__, ret);
  1187. return ret;
  1188. }
  1189. static int wdsp_spi_deinit(struct device *dev, void *priv_data)
  1190. {
  1191. struct spi_device *spi = to_spi_device(dev);
  1192. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1193. /*
  1194. * Deinit means the hardware is reset. Mark the cache
  1195. * as dirty here, so init will sync the cache
  1196. */
  1197. regcache_mark_dirty(wcd_spi->regmap);
  1198. return 0;
  1199. }
  1200. static struct wdsp_cmpnt_ops wdsp_spi_ops = {
  1201. .init = wdsp_spi_init,
  1202. .deinit = wdsp_spi_deinit,
  1203. .event_handler = wdsp_spi_event_handler,
  1204. };
  1205. static int wcd_spi_component_bind(struct device *dev,
  1206. struct device *master,
  1207. void *data)
  1208. {
  1209. struct spi_device *spi = to_spi_device(dev);
  1210. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1211. int ret = 0;
  1212. wcd_spi->m_dev = master;
  1213. wcd_spi->m_ops = data;
  1214. if (wcd_spi->m_ops &&
  1215. wcd_spi->m_ops->register_cmpnt_ops)
  1216. ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
  1217. wcd_spi,
  1218. &wdsp_spi_ops);
  1219. if (ret) {
  1220. dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
  1221. __func__, ret);
  1222. goto done;
  1223. }
  1224. wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
  1225. wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
  1226. wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
  1227. &spi->dev, &wcd_spi_regmap_cfg);
  1228. if (IS_ERR(wcd_spi->regmap)) {
  1229. ret = PTR_ERR(wcd_spi->regmap);
  1230. dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
  1231. __func__, ret);
  1232. goto done;
  1233. }
  1234. if (wcd_spi_debugfs_init(spi))
  1235. dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
  1236. spi_message_init(&wcd_spi->msg1);
  1237. spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
  1238. spi_message_init(&wcd_spi->msg2);
  1239. spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
  1240. spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
  1241. /* Pre-allocate the buffers */
  1242. wcd_spi->tx_buf = dma_zalloc_coherent(&spi->dev,
  1243. WCD_SPI_RW_MAX_BUF_SIZE,
  1244. &wcd_spi->tx_dma, GFP_KERNEL);
  1245. if (!wcd_spi->tx_buf) {
  1246. ret = -ENOMEM;
  1247. goto done;
  1248. }
  1249. wcd_spi->rx_buf = dma_zalloc_coherent(&spi->dev,
  1250. WCD_SPI_RW_MAX_BUF_SIZE,
  1251. &wcd_spi->rx_dma, GFP_KERNEL);
  1252. if (!wcd_spi->rx_buf) {
  1253. dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
  1254. wcd_spi->tx_buf, wcd_spi->tx_dma);
  1255. wcd_spi->tx_buf = NULL;
  1256. ret = -ENOMEM;
  1257. goto done;
  1258. }
  1259. done:
  1260. return ret;
  1261. }
  1262. static void wcd_spi_component_unbind(struct device *dev,
  1263. struct device *master,
  1264. void *data)
  1265. {
  1266. struct spi_device *spi = to_spi_device(dev);
  1267. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1268. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1269. debugfs_remove_recursive(dbg_data->dir);
  1270. dbg_data->dir = NULL;
  1271. wcd_spi->m_dev = NULL;
  1272. wcd_spi->m_ops = NULL;
  1273. spi_transfer_del(&wcd_spi->xfer1);
  1274. spi_transfer_del(&wcd_spi->xfer2[0]);
  1275. spi_transfer_del(&wcd_spi->xfer2[1]);
  1276. dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
  1277. wcd_spi->tx_buf, wcd_spi->tx_dma);
  1278. dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
  1279. wcd_spi->rx_buf, wcd_spi->rx_dma);
  1280. wcd_spi->tx_buf = NULL;
  1281. wcd_spi->rx_buf = NULL;
  1282. }
  1283. static const struct component_ops wcd_spi_component_ops = {
  1284. .bind = wcd_spi_component_bind,
  1285. .unbind = wcd_spi_component_unbind,
  1286. };
  1287. static int wcd_spi_probe(struct spi_device *spi)
  1288. {
  1289. struct wcd_spi_priv *wcd_spi;
  1290. int ret = 0;
  1291. wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
  1292. GFP_KERNEL);
  1293. if (!wcd_spi)
  1294. return -ENOMEM;
  1295. ret = of_property_read_u32(spi->dev.of_node,
  1296. "qcom,mem-base-addr",
  1297. &wcd_spi->mem_base_addr);
  1298. if (ret < 0) {
  1299. dev_err(&spi->dev, "%s: Missing %s DT entry",
  1300. __func__, "qcom,mem-base-addr");
  1301. goto err_ret;
  1302. }
  1303. dev_dbg(&spi->dev,
  1304. "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
  1305. mutex_init(&wcd_spi->clk_mutex);
  1306. mutex_init(&wcd_spi->xfer_mutex);
  1307. INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
  1308. init_completion(&wcd_spi->resume_comp);
  1309. arch_setup_dma_ops(&spi->dev, 0, 0, NULL, true);
  1310. wcd_spi->spi = spi;
  1311. spi_set_drvdata(spi, wcd_spi);
  1312. ret = component_add(&spi->dev, &wcd_spi_component_ops);
  1313. if (ret) {
  1314. dev_err(&spi->dev, "%s: component_add failed err = %d\n",
  1315. __func__, ret);
  1316. goto err_component_add;
  1317. }
  1318. return ret;
  1319. err_component_add:
  1320. mutex_destroy(&wcd_spi->clk_mutex);
  1321. mutex_destroy(&wcd_spi->xfer_mutex);
  1322. err_ret:
  1323. devm_kfree(&spi->dev, wcd_spi);
  1324. spi_set_drvdata(spi, NULL);
  1325. return ret;
  1326. }
  1327. static int wcd_spi_remove(struct spi_device *spi)
  1328. {
  1329. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1330. component_del(&spi->dev, &wcd_spi_component_ops);
  1331. mutex_destroy(&wcd_spi->clk_mutex);
  1332. mutex_destroy(&wcd_spi->xfer_mutex);
  1333. devm_kfree(&spi->dev, wcd_spi);
  1334. spi_set_drvdata(spi, NULL);
  1335. return 0;
  1336. }
  1337. #ifdef CONFIG_PM
  1338. static int wcd_spi_suspend(struct device *dev)
  1339. {
  1340. struct spi_device *spi = to_spi_device(dev);
  1341. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1342. int rc = 0;
  1343. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1344. if (!wcd_spi_can_suspend(wcd_spi)) {
  1345. rc = -EBUSY;
  1346. goto done;
  1347. }
  1348. /*
  1349. * If we are here, it is okay to let the suspend go
  1350. * through for this driver. But, still need to notify
  1351. * the master to make sure all other components can suspend
  1352. * as well.
  1353. */
  1354. if (wcd_spi->m_dev && wcd_spi->m_ops &&
  1355. wcd_spi->m_ops->suspend) {
  1356. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1357. rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
  1358. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1359. }
  1360. if (rc == 0)
  1361. set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1362. else
  1363. dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
  1364. __func__, rc);
  1365. done:
  1366. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1367. return rc;
  1368. }
  1369. static int wcd_spi_resume(struct device *dev)
  1370. {
  1371. struct spi_device *spi = to_spi_device(dev);
  1372. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1373. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1374. clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1375. complete(&wcd_spi->resume_comp);
  1376. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1377. return 0;
  1378. }
  1379. static const struct dev_pm_ops wcd_spi_pm_ops = {
  1380. .suspend = wcd_spi_suspend,
  1381. .resume = wcd_spi_resume,
  1382. };
  1383. #endif
  1384. static const struct of_device_id wcd_spi_of_match[] = {
  1385. { .compatible = "qcom,wcd-spi-v2", },
  1386. { }
  1387. };
  1388. MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
  1389. static struct spi_driver wcd_spi_driver = {
  1390. .driver = {
  1391. .name = "wcd-spi-v2",
  1392. .of_match_table = wcd_spi_of_match,
  1393. #ifdef CONFIG_PM
  1394. .pm = &wcd_spi_pm_ops,
  1395. #endif
  1396. },
  1397. .probe = wcd_spi_probe,
  1398. .remove = wcd_spi_remove,
  1399. };
  1400. module_spi_driver(wcd_spi_driver);
  1401. MODULE_DESCRIPTION("WCD SPI driver");
  1402. MODULE_LICENSE("GPL v2");