wcd-spi.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535
  1. /*
  2. * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/delay.h>
  18. #include <linux/bitops.h>
  19. #include <linux/spi/spi.h>
  20. #include <linux/regmap.h>
  21. #include <linux/component.h>
  22. #include <linux/ratelimit.h>
  23. #include <sound/wcd-dsp-mgr.h>
  24. #include <sound/wcd-spi.h>
  25. #include "wcd-spi-registers.h"
  26. /* Byte manipulations */
  27. #define SHIFT_1_BYTES (8)
  28. #define SHIFT_2_BYTES (16)
  29. #define SHIFT_3_BYTES (24)
  30. /* Command opcodes */
  31. #define WCD_SPI_CMD_NOP (0x00)
  32. #define WCD_SPI_CMD_WREN (0x06)
  33. #define WCD_SPI_CMD_CLKREQ (0xDA)
  34. #define WCD_SPI_CMD_RDSR (0x05)
  35. #define WCD_SPI_CMD_IRR (0x81)
  36. #define WCD_SPI_CMD_IRW (0x82)
  37. #define WCD_SPI_CMD_MIOR (0x83)
  38. #define WCD_SPI_CMD_FREAD (0x0B)
  39. #define WCD_SPI_CMD_MIOW (0x02)
  40. #define WCD_SPI_WRITE_FRAME_OPCODE \
  41. (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
  42. #define WCD_SPI_READ_FRAME_OPCODE \
  43. (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
  44. #define WCD_SPI_FREAD_FRAME_OPCODE \
  45. (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
  46. /* Command lengths */
  47. #define WCD_SPI_OPCODE_LEN (0x01)
  48. #define WCD_SPI_CMD_NOP_LEN (0x01)
  49. #define WCD_SPI_CMD_WREN_LEN (0x01)
  50. #define WCD_SPI_CMD_CLKREQ_LEN (0x04)
  51. #define WCD_SPI_CMD_IRR_LEN (0x04)
  52. #define WCD_SPI_CMD_IRW_LEN (0x06)
  53. #define WCD_SPI_WRITE_SINGLE_LEN (0x08)
  54. #define WCD_SPI_READ_SINGLE_LEN (0x13)
  55. #define WCD_SPI_CMD_FREAD_LEN (0x13)
  56. /* Command delays */
  57. #define WCD_SPI_CLKREQ_DELAY_USECS (500)
  58. #define WCD_SPI_CLK_OFF_TIMER_MS (500)
  59. #define WCD_SPI_RESUME_TIMEOUT_MS 100
  60. /* Command masks */
  61. #define WCD_CMD_ADDR_MASK \
  62. (0xFF | \
  63. (0xFF << SHIFT_1_BYTES) | \
  64. (0xFF << SHIFT_2_BYTES))
  65. /* Clock ctrl request related */
  66. #define WCD_SPI_CLK_ENABLE true
  67. #define WCD_SPI_CLK_DISABLE false
  68. #define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
  69. #define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
  70. /* Internal addresses */
  71. #define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
  72. /* Word sizes and min/max lengths */
  73. #define WCD_SPI_WORD_BYTE_CNT (4)
  74. #define WCD_SPI_RW_MULTI_MIN_LEN (16)
  75. /* Max size is 32 bytes less than 64Kbytes */
  76. #define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
  77. /*
  78. * Max size for the pre-allocated buffers is the max
  79. * possible read/write length + 32 bytes for the SPI
  80. * read/write command header itself.
  81. */
  82. #define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
  83. /* Alignment requirements */
  84. #define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
  85. #define WCD_SPI_RW_MULTI_ALIGN (16)
  86. /* Status mask bits */
  87. #define WCD_SPI_CLK_STATE_ENABLED BIT(0)
  88. #define WCD_SPI_IS_SUSPENDED BIT(1)
  89. /* Locking related */
  90. #define WCD_SPI_MUTEX_LOCK(spi, lock) \
  91. { \
  92. dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
  93. __func__, __stringify_1(lock)); \
  94. mutex_lock(&lock); \
  95. }
  96. #define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
  97. { \
  98. dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
  99. __func__, __stringify_1(lock)); \
  100. mutex_unlock(&lock); \
  101. }
  102. struct wcd_spi_debug_data {
  103. struct dentry *dir;
  104. u32 addr;
  105. u32 size;
  106. };
  107. struct wcd_spi_priv {
  108. struct spi_device *spi;
  109. u32 mem_base_addr;
  110. struct regmap *regmap;
  111. /* Message for single transfer */
  112. struct spi_message msg1;
  113. struct spi_transfer xfer1;
  114. /* Message for two transfers */
  115. struct spi_message msg2;
  116. struct spi_transfer xfer2[2];
  117. /* Register access related */
  118. u32 reg_bytes;
  119. u32 val_bytes;
  120. /* Clock requests related */
  121. struct mutex clk_mutex;
  122. int clk_users;
  123. unsigned long status_mask;
  124. struct delayed_work clk_dwork;
  125. /* Transaction related */
  126. struct mutex xfer_mutex;
  127. struct device *m_dev;
  128. struct wdsp_mgr_ops *m_ops;
  129. /* Debugfs related information */
  130. struct wcd_spi_debug_data debug_data;
  131. /* Completion object to indicate system resume completion */
  132. struct completion resume_comp;
  133. /* Buffers to hold memory used for transfers */
  134. void *tx_buf;
  135. void *rx_buf;
  136. };
  137. enum xfer_request {
  138. WCD_SPI_XFER_WRITE,
  139. WCD_SPI_XFER_READ,
  140. };
  141. static char *wcd_spi_xfer_req_str(enum xfer_request req)
  142. {
  143. if (req == WCD_SPI_XFER_WRITE)
  144. return "xfer_write";
  145. else if (req == WCD_SPI_XFER_READ)
  146. return "xfer_read";
  147. else
  148. return "xfer_invalid";
  149. }
  150. static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
  151. {
  152. xfer->tx_buf = NULL;
  153. xfer->rx_buf = NULL;
  154. xfer->delay_usecs = 0;
  155. xfer->len = 0;
  156. }
  157. static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
  158. {
  159. return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  160. }
  161. static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
  162. {
  163. struct spi_device *spi = wcd_spi->spi;
  164. if (wcd_spi->clk_users > 0 ||
  165. test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
  166. dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
  167. __func__, wcd_spi->clk_users);
  168. return false;
  169. }
  170. return true;
  171. }
  172. static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
  173. {
  174. struct spi_device *spi = wcd_spi->spi;
  175. int rc = 0;
  176. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  177. /* If the system is already in resumed state, return right away */
  178. if (!wcd_spi_is_suspended(wcd_spi))
  179. goto done;
  180. /* If suspended then wait for resume to happen */
  181. reinit_completion(&wcd_spi->resume_comp);
  182. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  183. rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
  184. msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
  185. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  186. if (rc == 0) {
  187. dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
  188. __func__, WCD_SPI_RESUME_TIMEOUT_MS);
  189. rc = -EIO;
  190. goto done;
  191. }
  192. dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
  193. rc = 0;
  194. done:
  195. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  196. return rc;
  197. }
  198. static int wcd_spi_read_single(struct spi_device *spi,
  199. u32 remote_addr, u32 *val)
  200. {
  201. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  202. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  203. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  204. u8 *tx_buf = wcd_spi->tx_buf;
  205. u32 frame = 0;
  206. int ret;
  207. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
  208. __func__, remote_addr);
  209. if (!tx_buf) {
  210. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  211. __func__);
  212. return -ENOMEM;
  213. }
  214. frame |= WCD_SPI_READ_FRAME_OPCODE;
  215. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  216. wcd_spi_reinit_xfer(tx_xfer);
  217. frame = cpu_to_be32(frame);
  218. memcpy(tx_buf, &frame, sizeof(frame));
  219. tx_xfer->tx_buf = tx_buf;
  220. tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
  221. wcd_spi_reinit_xfer(rx_xfer);
  222. rx_xfer->rx_buf = val;
  223. rx_xfer->len = sizeof(*val);
  224. ret = spi_sync(spi, &wcd_spi->msg2);
  225. return ret;
  226. }
  227. static int wcd_spi_read_multi(struct spi_device *spi,
  228. u32 remote_addr, u8 *data,
  229. size_t len)
  230. {
  231. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  232. struct spi_transfer *xfer = &wcd_spi->xfer1;
  233. u8 *tx_buf = wcd_spi->tx_buf;
  234. u8 *rx_buf = wcd_spi->rx_buf;
  235. u32 frame = 0;
  236. int ret;
  237. dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
  238. __func__, remote_addr, len);
  239. frame |= WCD_SPI_FREAD_FRAME_OPCODE;
  240. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  241. if (!tx_buf || !rx_buf) {
  242. dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
  243. (!tx_buf) ? "tx_buf" : "rx_buf");
  244. return -ENOMEM;
  245. }
  246. wcd_spi_reinit_xfer(xfer);
  247. frame = cpu_to_be32(frame);
  248. memcpy(tx_buf, &frame, sizeof(frame));
  249. xfer->tx_buf = tx_buf;
  250. xfer->rx_buf = rx_buf;
  251. xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
  252. ret = spi_sync(spi, &wcd_spi->msg1);
  253. if (ret) {
  254. dev_err(&spi->dev, "%s: failed, err = %d\n",
  255. __func__, ret);
  256. goto done;
  257. }
  258. memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
  259. done:
  260. return ret;
  261. }
  262. static int wcd_spi_write_single(struct spi_device *spi,
  263. u32 remote_addr, u32 val)
  264. {
  265. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  266. struct spi_transfer *xfer = &wcd_spi->xfer1;
  267. u8 buf[WCD_SPI_WRITE_SINGLE_LEN];
  268. u32 frame = 0;
  269. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
  270. __func__, remote_addr, val);
  271. memset(buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
  272. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  273. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  274. frame = cpu_to_be32(frame);
  275. memcpy(buf, &frame, sizeof(frame));
  276. memcpy(buf + sizeof(frame), &val, sizeof(val));
  277. wcd_spi_reinit_xfer(xfer);
  278. xfer->tx_buf = buf;
  279. xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
  280. return spi_sync(spi, &wcd_spi->msg1);
  281. }
  282. static int wcd_spi_write_multi(struct spi_device *spi,
  283. u32 remote_addr, u8 *data,
  284. size_t len)
  285. {
  286. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  287. struct spi_transfer *xfer = &wcd_spi->xfer1;
  288. u32 frame = 0;
  289. u8 *tx_buf = wcd_spi->tx_buf;
  290. int xfer_len, ret;
  291. dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
  292. __func__, remote_addr, len);
  293. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  294. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  295. frame = cpu_to_be32(frame);
  296. xfer_len = len + sizeof(frame);
  297. if (!tx_buf) {
  298. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  299. __func__);
  300. return -ENOMEM;
  301. }
  302. memcpy(tx_buf, &frame, sizeof(frame));
  303. memcpy(tx_buf + sizeof(frame), data, len);
  304. wcd_spi_reinit_xfer(xfer);
  305. xfer->tx_buf = tx_buf;
  306. xfer->len = xfer_len;
  307. ret = spi_sync(spi, &wcd_spi->msg1);
  308. if (ret < 0)
  309. dev_err(&spi->dev,
  310. "%s: Failed, addr = 0x%x, len = %zd\n",
  311. __func__, remote_addr, len);
  312. return ret;
  313. }
  314. static int wcd_spi_transfer_split(struct spi_device *spi,
  315. struct wcd_spi_msg *data_msg,
  316. enum xfer_request xfer_req)
  317. {
  318. u32 addr = data_msg->remote_addr;
  319. u8 *data = data_msg->data;
  320. int remain_size = data_msg->len;
  321. int to_xfer, loop_cnt, ret = 0;
  322. /* Perform single writes until multi word alignment is met */
  323. loop_cnt = 1;
  324. while (remain_size &&
  325. !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
  326. if (xfer_req == WCD_SPI_XFER_WRITE)
  327. ret = wcd_spi_write_single(spi, addr,
  328. (*(u32 *)data));
  329. else
  330. ret = wcd_spi_read_single(spi, addr,
  331. (u32 *)data);
  332. if (ret < 0) {
  333. dev_err(&spi->dev,
  334. "%s: %s fail iter(%d) start-word addr (0x%x)\n",
  335. __func__, wcd_spi_xfer_req_str(xfer_req),
  336. loop_cnt, addr);
  337. goto done;
  338. }
  339. addr += WCD_SPI_WORD_BYTE_CNT;
  340. data += WCD_SPI_WORD_BYTE_CNT;
  341. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  342. loop_cnt++;
  343. }
  344. /* Perform multi writes for max allowed multi writes */
  345. loop_cnt = 1;
  346. while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
  347. if (xfer_req == WCD_SPI_XFER_WRITE)
  348. ret = wcd_spi_write_multi(spi, addr, data,
  349. WCD_SPI_RW_MULTI_MAX_LEN);
  350. else
  351. ret = wcd_spi_read_multi(spi, addr, data,
  352. WCD_SPI_RW_MULTI_MAX_LEN);
  353. if (ret < 0) {
  354. dev_err(&spi->dev,
  355. "%s: %s fail iter(%d) max-write addr (0x%x)\n",
  356. __func__, wcd_spi_xfer_req_str(xfer_req),
  357. loop_cnt, addr);
  358. goto done;
  359. }
  360. addr += WCD_SPI_RW_MULTI_MAX_LEN;
  361. data += WCD_SPI_RW_MULTI_MAX_LEN;
  362. remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
  363. loop_cnt++;
  364. }
  365. /*
  366. * Perform write for max possible data that is multiple
  367. * of the minimum size for multi-write commands.
  368. */
  369. to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
  370. if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
  371. to_xfer > 0) {
  372. if (xfer_req == WCD_SPI_XFER_WRITE)
  373. ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
  374. else
  375. ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
  376. if (ret < 0) {
  377. dev_err(&spi->dev,
  378. "%s: %s fail write addr (0x%x), size (0x%x)\n",
  379. __func__, wcd_spi_xfer_req_str(xfer_req),
  380. addr, to_xfer);
  381. goto done;
  382. }
  383. addr += to_xfer;
  384. data += to_xfer;
  385. remain_size -= to_xfer;
  386. }
  387. /* Perform single writes for the last remaining data */
  388. loop_cnt = 1;
  389. while (remain_size > 0) {
  390. if (xfer_req == WCD_SPI_XFER_WRITE)
  391. ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
  392. else
  393. ret = wcd_spi_read_single(spi, addr, (u32 *) data);
  394. if (ret < 0) {
  395. dev_err(&spi->dev,
  396. "%s: %s fail iter(%d) end-write addr (0x%x)\n",
  397. __func__, wcd_spi_xfer_req_str(xfer_req),
  398. loop_cnt, addr);
  399. goto done;
  400. }
  401. addr += WCD_SPI_WORD_BYTE_CNT;
  402. data += WCD_SPI_WORD_BYTE_CNT;
  403. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  404. loop_cnt++;
  405. }
  406. done:
  407. return ret;
  408. }
  409. static int wcd_spi_cmd_nop(struct spi_device *spi)
  410. {
  411. u8 nop = WCD_SPI_CMD_NOP;
  412. return spi_write(spi, &nop, WCD_SPI_CMD_NOP_LEN);
  413. }
  414. static int wcd_spi_cmd_clkreq(struct spi_device *spi)
  415. {
  416. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  417. struct spi_transfer *xfer = &wcd_spi->xfer1;
  418. u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
  419. WCD_SPI_CMD_CLKREQ,
  420. 0xBA, 0x80, 0x00};
  421. wcd_spi_reinit_xfer(xfer);
  422. xfer->tx_buf = cmd;
  423. xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
  424. xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
  425. return spi_sync(spi, &wcd_spi->msg1);
  426. }
  427. static int wcd_spi_cmd_wr_en(struct spi_device *spi)
  428. {
  429. u8 wr_en = WCD_SPI_CMD_WREN;
  430. return spi_write(spi, &wr_en, WCD_SPI_CMD_WREN_LEN);
  431. }
  432. static int wcd_spi_cmd_rdsr(struct spi_device *spi,
  433. u32 *rdsr_status)
  434. {
  435. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  436. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  437. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  438. u8 rdsr_cmd;
  439. u32 status;
  440. int ret;
  441. rdsr_cmd = WCD_SPI_CMD_RDSR;
  442. wcd_spi_reinit_xfer(tx_xfer);
  443. tx_xfer->tx_buf = &rdsr_cmd;
  444. tx_xfer->len = sizeof(rdsr_cmd);
  445. wcd_spi_reinit_xfer(rx_xfer);
  446. rx_xfer->rx_buf = &status;
  447. rx_xfer->len = sizeof(status);
  448. ret = spi_sync(spi, &wcd_spi->msg2);
  449. if (ret < 0) {
  450. dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
  451. __func__, ret);
  452. goto done;
  453. }
  454. *rdsr_status = be32_to_cpu(status);
  455. dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
  456. __func__, *rdsr_status);
  457. done:
  458. return ret;
  459. }
  460. static int wcd_spi_clk_enable(struct spi_device *spi)
  461. {
  462. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  463. int ret;
  464. u32 rd_status = 0;
  465. ret = wcd_spi_cmd_nop(spi);
  466. if (ret < 0) {
  467. dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
  468. __func__, ret);
  469. goto done;
  470. }
  471. ret = wcd_spi_cmd_clkreq(spi);
  472. if (ret < 0) {
  473. dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
  474. __func__, ret);
  475. goto done;
  476. }
  477. ret = wcd_spi_cmd_nop(spi);
  478. if (ret < 0) {
  479. dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
  480. __func__, ret);
  481. goto done;
  482. }
  483. wcd_spi_cmd_rdsr(spi, &rd_status);
  484. /*
  485. * Read status zero means reads are not
  486. * happenning on the bus, possibly because
  487. * clock request failed.
  488. */
  489. if (rd_status) {
  490. set_bit(WCD_SPI_CLK_STATE_ENABLED,
  491. &wcd_spi->status_mask);
  492. } else {
  493. dev_err(&spi->dev, "%s: RDSR status is zero\n",
  494. __func__);
  495. ret = -EIO;
  496. }
  497. done:
  498. return ret;
  499. }
  500. static int wcd_spi_clk_disable(struct spi_device *spi)
  501. {
  502. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  503. int ret;
  504. ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
  505. if (ret < 0)
  506. dev_err(&spi->dev, "%s: Failed, err = %d\n",
  507. __func__, ret);
  508. else
  509. clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
  510. return ret;
  511. }
  512. static int wcd_spi_clk_ctrl(struct spi_device *spi,
  513. bool request, u32 flags)
  514. {
  515. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  516. int ret = 0;
  517. const char *delay_str;
  518. delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
  519. "delayed" : "immediate";
  520. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  521. /* Reject any unbalanced disable request */
  522. if (wcd_spi->clk_users < 0 ||
  523. (!request && wcd_spi->clk_users == 0)) {
  524. dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
  525. __func__, wcd_spi->clk_users,
  526. request ? "enable" : "disable");
  527. ret = -EINVAL;
  528. /* Reset the clk_users to 0 */
  529. wcd_spi->clk_users = 0;
  530. goto done;
  531. }
  532. if (request == WCD_SPI_CLK_ENABLE) {
  533. /*
  534. * If the SPI bus is suspended, then return error
  535. * as the transaction cannot be completed.
  536. */
  537. if (wcd_spi_is_suspended(wcd_spi)) {
  538. dev_err(&spi->dev,
  539. "%s: SPI suspended, cannot enable clk\n",
  540. __func__);
  541. ret = -EIO;
  542. goto done;
  543. }
  544. /* Cancel the disable clk work */
  545. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  546. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  547. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  548. wcd_spi->clk_users++;
  549. /*
  550. * If clk state is already set,
  551. * then clk wasnt really disabled
  552. */
  553. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  554. goto done;
  555. else if (wcd_spi->clk_users == 1)
  556. ret = wcd_spi_clk_enable(spi);
  557. } else {
  558. wcd_spi->clk_users--;
  559. /* Clock is still voted for */
  560. if (wcd_spi->clk_users > 0)
  561. goto done;
  562. /*
  563. * If we are here, clk_users must be 0 and needs
  564. * to be disabled. Call the disable based on the
  565. * flags.
  566. */
  567. if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
  568. schedule_delayed_work(&wcd_spi->clk_dwork,
  569. msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
  570. } else {
  571. ret = wcd_spi_clk_disable(spi);
  572. if (ret < 0)
  573. dev_err(&spi->dev,
  574. "%s: Failed to disable clk err = %d\n",
  575. __func__, ret);
  576. }
  577. }
  578. done:
  579. dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
  580. __func__, wcd_spi->clk_users, request ? "enable" : "disable",
  581. request ? "" : delay_str);
  582. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  583. return ret;
  584. }
  585. static int wcd_spi_init(struct spi_device *spi)
  586. {
  587. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  588. int ret;
  589. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  590. WCD_SPI_CLK_FLAG_IMMEDIATE);
  591. if (ret < 0)
  592. goto done;
  593. ret = wcd_spi_cmd_wr_en(spi);
  594. if (ret < 0)
  595. goto err_wr_en;
  596. /*
  597. * In case spi_init is called after component deinit,
  598. * it is possible hardware register state is also reset.
  599. * Sync the regcache here so hardware state is updated
  600. * to reflect the cache.
  601. */
  602. regcache_sync(wcd_spi->regmap);
  603. regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
  604. 0x0F3D0800);
  605. /* Write the MTU to max allowed size */
  606. regmap_update_bits(wcd_spi->regmap,
  607. WCD_SPI_SLAVE_TRNS_LEN,
  608. 0xFFFF0000, 0xFFFF0000);
  609. err_wr_en:
  610. wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  611. WCD_SPI_CLK_FLAG_IMMEDIATE);
  612. done:
  613. return ret;
  614. }
  615. static void wcd_spi_clk_work(struct work_struct *work)
  616. {
  617. struct delayed_work *dwork;
  618. struct wcd_spi_priv *wcd_spi;
  619. struct spi_device *spi;
  620. int ret;
  621. dwork = to_delayed_work(work);
  622. wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
  623. spi = wcd_spi->spi;
  624. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  625. ret = wcd_spi_clk_disable(spi);
  626. if (ret < 0)
  627. dev_err(&spi->dev,
  628. "%s: Failed to disable clk, err = %d\n",
  629. __func__, ret);
  630. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  631. }
  632. static int __wcd_spi_data_xfer(struct spi_device *spi,
  633. struct wcd_spi_msg *msg,
  634. enum xfer_request xfer_req)
  635. {
  636. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  637. int ret;
  638. /* Check for minimum alignment requirements */
  639. if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
  640. dev_err(&spi->dev,
  641. "%s addr 0x%x is not aligned to 0x%x\n",
  642. __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
  643. return -EINVAL;
  644. } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
  645. dev_err(&spi->dev,
  646. "%s len 0x%zx is not multiple of %d\n",
  647. __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
  648. return -EINVAL;
  649. }
  650. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
  651. if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
  652. if (xfer_req == WCD_SPI_XFER_WRITE)
  653. ret = wcd_spi_write_single(spi, msg->remote_addr,
  654. (*((u32 *)msg->data)));
  655. else
  656. ret = wcd_spi_read_single(spi, msg->remote_addr,
  657. (u32 *) msg->data);
  658. } else {
  659. ret = wcd_spi_transfer_split(spi, msg, xfer_req);
  660. }
  661. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
  662. return ret;
  663. }
  664. static int wcd_spi_data_xfer(struct spi_device *spi,
  665. struct wcd_spi_msg *msg,
  666. enum xfer_request req)
  667. {
  668. int ret, ret1;
  669. if (msg->len <= 0) {
  670. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  671. __func__, msg->len);
  672. return -EINVAL;
  673. }
  674. /* Request for clock */
  675. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  676. WCD_SPI_CLK_FLAG_IMMEDIATE);
  677. if (ret < 0) {
  678. dev_err(&spi->dev, "%s: clk enable failed %d\n",
  679. __func__, ret);
  680. goto done;
  681. }
  682. /* Perform the transaction */
  683. ret = __wcd_spi_data_xfer(spi, msg, req);
  684. if (ret < 0)
  685. dev_err(&spi->dev,
  686. "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
  687. __func__, wcd_spi_xfer_req_str(req),
  688. msg->remote_addr, msg->len, ret);
  689. /* Release the clock even if xfer failed */
  690. ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  691. WCD_SPI_CLK_FLAG_DELAYED);
  692. if (ret1 < 0)
  693. dev_err(&spi->dev, "%s: clk disable failed %d\n",
  694. __func__, ret1);
  695. done:
  696. return ret;
  697. }
  698. /*
  699. * wcd_spi_data_write: Write data to WCD SPI
  700. * @spi: spi_device struct
  701. * @msg: msg that needs to be written to WCD
  702. *
  703. * This API writes length of data to address specified. These details
  704. * about the write are encapsulated in @msg. Write size should be multiple
  705. * of 4 bytes and write address should be 4-byte aligned.
  706. */
  707. static int wcd_spi_data_write(struct spi_device *spi,
  708. struct wcd_spi_msg *msg)
  709. {
  710. if (!spi || !msg) {
  711. pr_err("%s: Invalid %s\n", __func__,
  712. (!spi) ? "spi device" : "msg");
  713. return -EINVAL;
  714. }
  715. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
  716. __func__, msg->remote_addr, msg->len);
  717. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
  718. }
  719. /*
  720. * wcd_spi_data_read: Read data from WCD SPI
  721. * @spi: spi_device struct
  722. * @msg: msg that needs to be read from WCD
  723. *
  724. * This API reads length of data from address specified. These details
  725. * about the read are encapsulated in @msg. Read size should be multiple
  726. * of 4 bytes and read address should be 4-byte aligned.
  727. */
  728. static int wcd_spi_data_read(struct spi_device *spi,
  729. struct wcd_spi_msg *msg)
  730. {
  731. if (!spi || !msg) {
  732. pr_err("%s: Invalid %s\n", __func__,
  733. (!spi) ? "spi device" : "msg");
  734. return -EINVAL;
  735. }
  736. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
  737. __func__, msg->remote_addr, msg->len);
  738. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
  739. }
  740. static int wdsp_spi_dload_section(struct spi_device *spi,
  741. void *data)
  742. {
  743. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  744. struct wdsp_img_section *sec = data;
  745. struct wcd_spi_msg msg;
  746. int ret;
  747. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  748. __func__, sec->addr, sec->size);
  749. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  750. msg.data = sec->data;
  751. msg.len = sec->size;
  752. ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
  753. if (ret < 0)
  754. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  755. __func__, msg.remote_addr, msg.len);
  756. return ret;
  757. }
  758. static int wdsp_spi_read_section(struct spi_device *spi, void *data)
  759. {
  760. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  761. struct wdsp_img_section *sec = data;
  762. struct wcd_spi_msg msg;
  763. int ret;
  764. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  765. msg.data = sec->data;
  766. msg.len = sec->size;
  767. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  768. __func__, msg.remote_addr, msg.len);
  769. ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
  770. if (ret < 0)
  771. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  772. __func__, msg.remote_addr, msg.len);
  773. return ret;
  774. }
  775. static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
  776. enum wdsp_event_type event,
  777. void *data)
  778. {
  779. struct spi_device *spi = to_spi_device(dev);
  780. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  781. struct wcd_spi_ops *spi_ops;
  782. int ret = 0;
  783. dev_dbg(&spi->dev, "%s: event type %d\n",
  784. __func__, event);
  785. switch (event) {
  786. case WDSP_EVENT_POST_SHUTDOWN:
  787. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  788. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  789. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  790. wcd_spi_clk_disable(spi);
  791. wcd_spi->clk_users = 0;
  792. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  793. break;
  794. case WDSP_EVENT_PRE_DLOAD_CODE:
  795. case WDSP_EVENT_PRE_DLOAD_DATA:
  796. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  797. WCD_SPI_CLK_FLAG_IMMEDIATE);
  798. if (ret < 0)
  799. dev_err(&spi->dev, "%s: clk_req failed %d\n",
  800. __func__, ret);
  801. break;
  802. case WDSP_EVENT_POST_DLOAD_CODE:
  803. case WDSP_EVENT_POST_DLOAD_DATA:
  804. case WDSP_EVENT_DLOAD_FAILED:
  805. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  806. WCD_SPI_CLK_FLAG_IMMEDIATE);
  807. if (ret < 0)
  808. dev_err(&spi->dev, "%s: clk unvote failed %d\n",
  809. __func__, ret);
  810. break;
  811. case WDSP_EVENT_DLOAD_SECTION:
  812. ret = wdsp_spi_dload_section(spi, data);
  813. break;
  814. case WDSP_EVENT_READ_SECTION:
  815. ret = wdsp_spi_read_section(spi, data);
  816. break;
  817. case WDSP_EVENT_SUSPEND:
  818. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  819. if (!wcd_spi_can_suspend(wcd_spi))
  820. ret = -EBUSY;
  821. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  822. break;
  823. case WDSP_EVENT_RESUME:
  824. ret = wcd_spi_wait_for_resume(wcd_spi);
  825. break;
  826. case WDSP_EVENT_GET_DEVOPS:
  827. if (!data) {
  828. dev_err(&spi->dev, "%s: invalid data\n",
  829. __func__);
  830. ret = -EINVAL;
  831. break;
  832. }
  833. spi_ops = (struct wcd_spi_ops *) data;
  834. spi_ops->spi_dev = spi;
  835. spi_ops->read_dev = wcd_spi_data_read;
  836. spi_ops->write_dev = wcd_spi_data_write;
  837. break;
  838. default:
  839. dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
  840. __func__, event);
  841. break;
  842. }
  843. return ret;
  844. }
  845. static int wcd_spi_bus_gwrite(void *context, const void *reg,
  846. size_t reg_len, const void *val,
  847. size_t val_len)
  848. {
  849. struct device *dev = context;
  850. struct spi_device *spi = to_spi_device(dev);
  851. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  852. u8 tx_buf[WCD_SPI_CMD_IRW_LEN];
  853. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  854. val_len != wcd_spi->val_bytes) {
  855. dev_err(&spi->dev,
  856. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  857. __func__, reg_len, val_len);
  858. return -EINVAL;
  859. }
  860. tx_buf[0] = WCD_SPI_CMD_IRW;
  861. tx_buf[1] = *((u8 *)reg);
  862. memcpy(&tx_buf[WCD_SPI_OPCODE_LEN + reg_len],
  863. val, val_len);
  864. return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
  865. }
  866. static int wcd_spi_bus_write(void *context, const void *data,
  867. size_t count)
  868. {
  869. struct device *dev = context;
  870. struct spi_device *spi = to_spi_device(dev);
  871. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  872. if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
  873. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  874. __func__, count);
  875. WARN_ON(1);
  876. return -EINVAL;
  877. }
  878. return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
  879. data + wcd_spi->reg_bytes,
  880. count - wcd_spi->reg_bytes);
  881. }
  882. static int wcd_spi_bus_read(void *context, const void *reg,
  883. size_t reg_len, void *val,
  884. size_t val_len)
  885. {
  886. struct device *dev = context;
  887. struct spi_device *spi = to_spi_device(dev);
  888. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  889. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  890. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  891. u8 tx_buf[WCD_SPI_CMD_IRR_LEN];
  892. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  893. val_len != wcd_spi->val_bytes) {
  894. dev_err(&spi->dev,
  895. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  896. __func__, reg_len, val_len);
  897. return -EINVAL;
  898. }
  899. memset(tx_buf, 0, WCD_SPI_OPCODE_LEN);
  900. tx_buf[0] = WCD_SPI_CMD_IRR;
  901. tx_buf[1] = *((u8 *)reg);
  902. wcd_spi_reinit_xfer(tx_xfer);
  903. tx_xfer->tx_buf = tx_buf;
  904. tx_xfer->rx_buf = NULL;
  905. tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
  906. wcd_spi_reinit_xfer(rx_xfer);
  907. rx_xfer->tx_buf = NULL;
  908. rx_xfer->rx_buf = val;
  909. rx_xfer->len = val_len;
  910. return spi_sync(spi, &wcd_spi->msg2);
  911. }
  912. static struct regmap_bus wcd_spi_regmap_bus = {
  913. .write = wcd_spi_bus_write,
  914. .gather_write = wcd_spi_bus_gwrite,
  915. .read = wcd_spi_bus_read,
  916. .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
  917. .val_format_endian_default = REGMAP_ENDIAN_BIG,
  918. };
  919. static int wcd_spi_state_show(struct seq_file *f, void *ptr)
  920. {
  921. struct spi_device *spi = f->private;
  922. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  923. const char *clk_state, *clk_mutex, *xfer_mutex;
  924. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  925. clk_state = "enabled";
  926. else
  927. clk_state = "disabled";
  928. clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
  929. "locked" : "unlocked";
  930. xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
  931. "locked" : "unlocked";
  932. seq_printf(f, "clk_state = %s\nclk_users = %d\n"
  933. "clk_mutex = %s\nxfer_mutex = %s\n",
  934. clk_state, wcd_spi->clk_users, clk_mutex,
  935. xfer_mutex);
  936. return 0;
  937. }
  938. static int wcd_spi_state_open(struct inode *inode, struct file *file)
  939. {
  940. return single_open(file, wcd_spi_state_show, inode->i_private);
  941. }
  942. static const struct file_operations state_fops = {
  943. .open = wcd_spi_state_open,
  944. .read = seq_read,
  945. .llseek = seq_lseek,
  946. .release = single_release,
  947. };
  948. static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
  949. size_t count, loff_t *ppos)
  950. {
  951. struct spi_device *spi = file->private_data;
  952. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  953. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  954. struct wcd_spi_msg msg;
  955. ssize_t buf_size, read_count = 0;
  956. char *buf;
  957. int ret;
  958. if (*ppos < 0 || !count)
  959. return -EINVAL;
  960. if (dbg_data->size == 0 || dbg_data->addr == 0) {
  961. dev_err(&spi->dev,
  962. "%s: Invalid request, size = %u, addr = 0x%x\n",
  963. __func__, dbg_data->size, dbg_data->addr);
  964. return 0;
  965. }
  966. buf_size = count < dbg_data->size ? count : dbg_data->size;
  967. buf = kzalloc(buf_size, GFP_KERNEL);
  968. if (!buf)
  969. return -ENOMEM;
  970. msg.data = buf;
  971. msg.remote_addr = dbg_data->addr;
  972. msg.len = buf_size;
  973. msg.flags = 0;
  974. ret = wcd_spi_data_read(spi, &msg);
  975. if (ret < 0) {
  976. dev_err(&spi->dev,
  977. "%s: Failed to read %zu bytes from addr 0x%x\n",
  978. __func__, buf_size, msg.remote_addr);
  979. goto done;
  980. }
  981. read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
  982. done:
  983. kfree(buf);
  984. if (ret < 0)
  985. return ret;
  986. else
  987. return read_count;
  988. }
  989. static const struct file_operations mem_read_fops = {
  990. .open = simple_open,
  991. .read = wcd_spi_debugfs_mem_read,
  992. };
  993. static int wcd_spi_debugfs_init(struct spi_device *spi)
  994. {
  995. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  996. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  997. int rc = 0;
  998. dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
  999. if (IS_ERR_OR_NULL(dbg_data->dir)) {
  1000. dbg_data->dir = NULL;
  1001. rc = -ENODEV;
  1002. goto done;
  1003. }
  1004. debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
  1005. debugfs_create_u32("addr", 0644, dbg_data->dir,
  1006. &dbg_data->addr);
  1007. debugfs_create_u32("size", 0644, dbg_data->dir,
  1008. &dbg_data->size);
  1009. debugfs_create_file("mem_read", 0444, dbg_data->dir,
  1010. spi, &mem_read_fops);
  1011. done:
  1012. return rc;
  1013. }
  1014. static const struct reg_default wcd_spi_defaults[] = {
  1015. {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
  1016. {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
  1017. {WCD_SPI_SLAVE_STATUS, 0x80100000},
  1018. {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
  1019. {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
  1020. {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
  1021. {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
  1022. {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
  1023. {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
  1024. {WCD_SPI_SLAVE_TX, 0x00000000},
  1025. {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
  1026. {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
  1027. {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
  1028. {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
  1029. {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
  1030. {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
  1031. {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
  1032. {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
  1033. {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
  1034. {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
  1035. {WCD_SPI_SLAVE_GENERICS, 0x80000000},
  1036. {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
  1037. };
  1038. static bool wcd_spi_is_volatile_reg(struct device *dev,
  1039. unsigned int reg)
  1040. {
  1041. switch (reg) {
  1042. case WCD_SPI_SLAVE_SANITY:
  1043. case WCD_SPI_SLAVE_STATUS:
  1044. case WCD_SPI_SLAVE_IRQ_STATUS:
  1045. case WCD_SPI_SLAVE_TX:
  1046. case WCD_SPI_SLAVE_SW_RST_IRQ:
  1047. case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
  1048. case WCD_SPI_SLAVE_FIFO_LEVEL:
  1049. case WCD_SPI_SLAVE_GENERICS:
  1050. return true;
  1051. }
  1052. return false;
  1053. }
  1054. static bool wcd_spi_is_readable_reg(struct device *dev,
  1055. unsigned int reg)
  1056. {
  1057. switch (reg) {
  1058. case WCD_SPI_SLAVE_SW_RESET:
  1059. case WCD_SPI_SLAVE_IRQ_CLR:
  1060. case WCD_SPI_SLAVE_IRQ_FORCE:
  1061. return false;
  1062. }
  1063. return true;
  1064. }
  1065. static struct regmap_config wcd_spi_regmap_cfg = {
  1066. .reg_bits = 8,
  1067. .val_bits = 32,
  1068. .cache_type = REGCACHE_RBTREE,
  1069. .reg_defaults = wcd_spi_defaults,
  1070. .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
  1071. .max_register = WCD_SPI_MAX_REGISTER,
  1072. .volatile_reg = wcd_spi_is_volatile_reg,
  1073. .readable_reg = wcd_spi_is_readable_reg,
  1074. };
  1075. static int wdsp_spi_init(struct device *dev, void *priv_data)
  1076. {
  1077. struct spi_device *spi = to_spi_device(dev);
  1078. int ret;
  1079. ret = wcd_spi_init(spi);
  1080. if (ret < 0)
  1081. dev_err(&spi->dev, "%s: Init failed, err = %d\n",
  1082. __func__, ret);
  1083. return ret;
  1084. }
  1085. static int wdsp_spi_deinit(struct device *dev, void *priv_data)
  1086. {
  1087. struct spi_device *spi = to_spi_device(dev);
  1088. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1089. /*
  1090. * Deinit means the hardware is reset. Mark the cache
  1091. * as dirty here, so init will sync the cache
  1092. */
  1093. regcache_mark_dirty(wcd_spi->regmap);
  1094. return 0;
  1095. }
  1096. static struct wdsp_cmpnt_ops wdsp_spi_ops = {
  1097. .init = wdsp_spi_init,
  1098. .deinit = wdsp_spi_deinit,
  1099. .event_handler = wdsp_spi_event_handler,
  1100. };
  1101. static int wcd_spi_component_bind(struct device *dev,
  1102. struct device *master,
  1103. void *data)
  1104. {
  1105. struct spi_device *spi = to_spi_device(dev);
  1106. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1107. int ret = 0;
  1108. wcd_spi->m_dev = master;
  1109. wcd_spi->m_ops = data;
  1110. if (wcd_spi->m_ops &&
  1111. wcd_spi->m_ops->register_cmpnt_ops)
  1112. ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
  1113. wcd_spi,
  1114. &wdsp_spi_ops);
  1115. if (ret) {
  1116. dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
  1117. __func__, ret);
  1118. goto done;
  1119. }
  1120. wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
  1121. wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
  1122. wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
  1123. &spi->dev, &wcd_spi_regmap_cfg);
  1124. if (IS_ERR(wcd_spi->regmap)) {
  1125. ret = PTR_ERR(wcd_spi->regmap);
  1126. dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
  1127. __func__, ret);
  1128. goto done;
  1129. }
  1130. if (wcd_spi_debugfs_init(spi))
  1131. dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
  1132. spi_message_init(&wcd_spi->msg1);
  1133. spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
  1134. spi_message_init(&wcd_spi->msg2);
  1135. spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
  1136. spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
  1137. /* Pre-allocate the buffers */
  1138. wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
  1139. GFP_KERNEL | GFP_DMA);
  1140. if (!wcd_spi->tx_buf) {
  1141. ret = -ENOMEM;
  1142. goto done;
  1143. }
  1144. wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
  1145. GFP_KERNEL | GFP_DMA);
  1146. if (!wcd_spi->rx_buf) {
  1147. kfree(wcd_spi->tx_buf);
  1148. wcd_spi->tx_buf = NULL;
  1149. ret = -ENOMEM;
  1150. goto done;
  1151. }
  1152. done:
  1153. return ret;
  1154. }
  1155. static void wcd_spi_component_unbind(struct device *dev,
  1156. struct device *master,
  1157. void *data)
  1158. {
  1159. struct spi_device *spi = to_spi_device(dev);
  1160. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1161. wcd_spi->m_dev = NULL;
  1162. wcd_spi->m_ops = NULL;
  1163. spi_transfer_del(&wcd_spi->xfer1);
  1164. spi_transfer_del(&wcd_spi->xfer2[0]);
  1165. spi_transfer_del(&wcd_spi->xfer2[1]);
  1166. kfree(wcd_spi->tx_buf);
  1167. kfree(wcd_spi->rx_buf);
  1168. wcd_spi->tx_buf = NULL;
  1169. wcd_spi->rx_buf = NULL;
  1170. }
  1171. static const struct component_ops wcd_spi_component_ops = {
  1172. .bind = wcd_spi_component_bind,
  1173. .unbind = wcd_spi_component_unbind,
  1174. };
  1175. static int wcd_spi_probe(struct spi_device *spi)
  1176. {
  1177. struct wcd_spi_priv *wcd_spi;
  1178. int ret = 0;
  1179. wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
  1180. GFP_KERNEL);
  1181. if (!wcd_spi)
  1182. return -ENOMEM;
  1183. ret = of_property_read_u32(spi->dev.of_node,
  1184. "qcom,mem-base-addr",
  1185. &wcd_spi->mem_base_addr);
  1186. if (ret < 0) {
  1187. dev_err(&spi->dev, "%s: Missing %s DT entry",
  1188. __func__, "qcom,mem-base-addr");
  1189. goto err_ret;
  1190. }
  1191. dev_dbg(&spi->dev,
  1192. "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
  1193. mutex_init(&wcd_spi->clk_mutex);
  1194. mutex_init(&wcd_spi->xfer_mutex);
  1195. INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
  1196. init_completion(&wcd_spi->resume_comp);
  1197. wcd_spi->spi = spi;
  1198. spi_set_drvdata(spi, wcd_spi);
  1199. ret = component_add(&spi->dev, &wcd_spi_component_ops);
  1200. if (ret) {
  1201. dev_err(&spi->dev, "%s: component_add failed err = %d\n",
  1202. __func__, ret);
  1203. goto err_component_add;
  1204. }
  1205. return ret;
  1206. err_component_add:
  1207. mutex_destroy(&wcd_spi->clk_mutex);
  1208. mutex_destroy(&wcd_spi->xfer_mutex);
  1209. err_ret:
  1210. devm_kfree(&spi->dev, wcd_spi);
  1211. spi_set_drvdata(spi, NULL);
  1212. return ret;
  1213. }
  1214. static int wcd_spi_remove(struct spi_device *spi)
  1215. {
  1216. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1217. component_del(&spi->dev, &wcd_spi_component_ops);
  1218. mutex_destroy(&wcd_spi->clk_mutex);
  1219. mutex_destroy(&wcd_spi->xfer_mutex);
  1220. devm_kfree(&spi->dev, wcd_spi);
  1221. spi_set_drvdata(spi, NULL);
  1222. return 0;
  1223. }
  1224. #ifdef CONFIG_PM
  1225. static int wcd_spi_suspend(struct device *dev)
  1226. {
  1227. struct spi_device *spi = to_spi_device(dev);
  1228. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1229. int rc = 0;
  1230. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1231. if (!wcd_spi_can_suspend(wcd_spi)) {
  1232. rc = -EBUSY;
  1233. goto done;
  1234. }
  1235. /*
  1236. * If we are here, it is okay to let the suspend go
  1237. * through for this driver. But, still need to notify
  1238. * the master to make sure all other components can suspend
  1239. * as well.
  1240. */
  1241. if (wcd_spi->m_dev && wcd_spi->m_ops &&
  1242. wcd_spi->m_ops->suspend) {
  1243. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1244. rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
  1245. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1246. }
  1247. if (rc == 0)
  1248. set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1249. else
  1250. dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
  1251. __func__, rc);
  1252. done:
  1253. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1254. return rc;
  1255. }
  1256. static int wcd_spi_resume(struct device *dev)
  1257. {
  1258. struct spi_device *spi = to_spi_device(dev);
  1259. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1260. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1261. clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1262. complete(&wcd_spi->resume_comp);
  1263. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1264. return 0;
  1265. }
  1266. static const struct dev_pm_ops wcd_spi_pm_ops = {
  1267. .suspend = wcd_spi_suspend,
  1268. .resume = wcd_spi_resume,
  1269. };
  1270. #endif
  1271. static const struct of_device_id wcd_spi_of_match[] = {
  1272. { .compatible = "qcom,wcd-spi-v2", },
  1273. { }
  1274. };
  1275. MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
  1276. static struct spi_driver wcd_spi_driver = {
  1277. .driver = {
  1278. .name = "wcd-spi-v2",
  1279. .of_match_table = wcd_spi_of_match,
  1280. #ifdef CONFIG_PM
  1281. .pm = &wcd_spi_pm_ops,
  1282. #endif
  1283. },
  1284. .probe = wcd_spi_probe,
  1285. .remove = wcd_spi_remove,
  1286. };
  1287. module_spi_driver(wcd_spi_driver);
  1288. MODULE_DESCRIPTION("WCD SPI driver");
  1289. MODULE_LICENSE("GPL v2");