wcd-spi.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/of.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/delay.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/bitops.h>
  12. #include <linux/spi/spi.h>
  13. #include <linux/regmap.h>
  14. #include <linux/component.h>
  15. #include <linux/ratelimit.h>
  16. #include <linux/platform_device.h>
  17. #include <sound/wcd-dsp-mgr.h>
  18. #include <sound/wcd-spi.h>
  19. #include <soc/wcd-spi-ac.h>
  20. #include "wcd-spi-registers.h"
  21. /* Byte manipulations */
  22. #define SHIFT_1_BYTES (8)
  23. #define SHIFT_2_BYTES (16)
  24. #define SHIFT_3_BYTES (24)
  25. /* Command opcodes */
  26. #define WCD_SPI_CMD_NOP (0x00)
  27. #define WCD_SPI_CMD_WREN (0x06)
  28. #define WCD_SPI_CMD_CLKREQ (0xDA)
  29. #define WCD_SPI_CMD_RDSR (0x05)
  30. #define WCD_SPI_CMD_IRR (0x81)
  31. #define WCD_SPI_CMD_IRW (0x82)
  32. #define WCD_SPI_CMD_MIOR (0x83)
  33. #define WCD_SPI_CMD_FREAD (0x0B)
  34. #define WCD_SPI_CMD_MIOW (0x02)
  35. #define WCD_SPI_WRITE_FRAME_OPCODE \
  36. (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
  37. #define WCD_SPI_READ_FRAME_OPCODE \
  38. (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
  39. #define WCD_SPI_FREAD_FRAME_OPCODE \
  40. (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
  41. /* Command lengths */
  42. #define WCD_SPI_OPCODE_LEN (0x01)
  43. #define WCD_SPI_CMD_NOP_LEN (0x01)
  44. #define WCD_SPI_CMD_WREN_LEN (0x01)
  45. #define WCD_SPI_CMD_CLKREQ_LEN (0x04)
  46. #define WCD_SPI_CMD_IRR_LEN (0x04)
  47. #define WCD_SPI_CMD_IRW_LEN (0x06)
  48. #define WCD_SPI_WRITE_SINGLE_LEN (0x08)
  49. #define WCD_SPI_READ_SINGLE_LEN (0x13)
  50. #define WCD_SPI_CMD_FREAD_LEN (0x13)
  51. /* Command delays */
  52. #define WCD_SPI_CLKREQ_DELAY_USECS (500)
  53. #define WCD_SPI_CLK_OFF_TIMER_MS (500)
  54. #define WCD_SPI_RESUME_TIMEOUT_MS 100
  55. /* Command masks */
  56. #define WCD_CMD_ADDR_MASK \
  57. (0xFF | \
  58. (0xFF << SHIFT_1_BYTES) | \
  59. (0xFF << SHIFT_2_BYTES))
  60. /* Clock ctrl request related */
  61. #define WCD_SPI_CLK_ENABLE true
  62. #define WCD_SPI_CLK_DISABLE false
  63. #define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
  64. #define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
  65. /* Internal addresses */
  66. #define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
  67. /* Word sizes and min/max lengths */
  68. #define WCD_SPI_WORD_BYTE_CNT (4)
  69. #define WCD_SPI_RW_MULTI_MIN_LEN (16)
  70. /* Max size is 32 bytes less than 64Kbytes */
  71. #define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
  72. /*
  73. * Max size for the pre-allocated buffers is the max
  74. * possible read/write length + 32 bytes for the SPI
  75. * read/write command header itself.
  76. */
  77. #define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
  78. /* Alignment requirements */
  79. #define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
  80. #define WCD_SPI_RW_MULTI_ALIGN (16)
  81. /* Status mask bits */
  82. #define WCD_SPI_CLK_STATE_ENABLED BIT(0)
  83. #define WCD_SPI_IS_SUSPENDED BIT(1)
  84. /* Locking related */
  85. #define WCD_SPI_MUTEX_LOCK(spi, lock) \
  86. { \
  87. dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
  88. __func__, __stringify_1(lock)); \
  89. mutex_lock(&lock); \
  90. }
  91. #define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
  92. { \
  93. dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
  94. __func__, __stringify_1(lock)); \
  95. mutex_unlock(&lock); \
  96. }
  97. struct wcd_spi_debug_data {
  98. struct dentry *dir;
  99. u32 addr;
  100. u32 size;
  101. };
  102. struct wcd_spi_priv {
  103. struct spi_device *spi;
  104. u32 mem_base_addr;
  105. struct regmap *regmap;
  106. /* Message for single transfer */
  107. struct spi_message msg1;
  108. struct spi_transfer xfer1;
  109. /* Message for two transfers */
  110. struct spi_message msg2;
  111. struct spi_transfer xfer2[2];
  112. /* Register access related */
  113. u32 reg_bytes;
  114. u32 val_bytes;
  115. /* Clock requests related */
  116. struct mutex clk_mutex;
  117. int clk_users;
  118. unsigned long status_mask;
  119. struct delayed_work clk_dwork;
  120. /* Transaction related */
  121. struct mutex xfer_mutex;
  122. struct device *m_dev;
  123. struct wdsp_mgr_ops *m_ops;
  124. /* Debugfs related information */
  125. struct wcd_spi_debug_data debug_data;
  126. /* Completion object to indicate system resume completion */
  127. struct completion resume_comp;
  128. /* Buffers to hold memory used for transfers */
  129. void *tx_buf;
  130. void *rx_buf;
  131. /* DMA handles for transfer buffers */
  132. dma_addr_t tx_dma;
  133. dma_addr_t rx_dma;
  134. /* Handle to child (qmi client) device */
  135. struct device *ac_dev;
  136. };
  137. enum xfer_request {
  138. WCD_SPI_XFER_WRITE,
  139. WCD_SPI_XFER_READ,
  140. };
  141. static char *wcd_spi_xfer_req_str(enum xfer_request req)
  142. {
  143. if (req == WCD_SPI_XFER_WRITE)
  144. return "xfer_write";
  145. else if (req == WCD_SPI_XFER_READ)
  146. return "xfer_read";
  147. else
  148. return "xfer_invalid";
  149. }
  150. static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
  151. {
  152. xfer->tx_buf = NULL;
  153. xfer->rx_buf = NULL;
  154. xfer->delay_usecs = 0;
  155. xfer->len = 0;
  156. }
  157. static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
  158. {
  159. return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  160. }
  161. static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
  162. {
  163. struct spi_device *spi = wcd_spi->spi;
  164. if (wcd_spi->clk_users > 0 ||
  165. test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
  166. dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
  167. __func__, wcd_spi->clk_users);
  168. return false;
  169. }
  170. return true;
  171. }
  172. static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
  173. {
  174. struct spi_device *spi = wcd_spi->spi;
  175. int rc = 0;
  176. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  177. /* If the system is already in resumed state, return right away */
  178. if (!wcd_spi_is_suspended(wcd_spi))
  179. goto done;
  180. /* If suspended then wait for resume to happen */
  181. reinit_completion(&wcd_spi->resume_comp);
  182. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  183. rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
  184. msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
  185. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  186. if (rc == 0) {
  187. dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
  188. __func__, WCD_SPI_RESUME_TIMEOUT_MS);
  189. rc = -EIO;
  190. goto done;
  191. }
  192. dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
  193. rc = 0;
  194. done:
  195. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  196. return rc;
  197. }
  198. static int wcd_spi_read_single(struct spi_device *spi,
  199. u32 remote_addr, u32 *val)
  200. {
  201. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  202. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  203. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  204. u8 *tx_buf = wcd_spi->tx_buf;
  205. u8 *rx_buf = wcd_spi->rx_buf;
  206. u32 frame = 0;
  207. int ret;
  208. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
  209. __func__, remote_addr);
  210. if (!tx_buf) {
  211. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  212. __func__);
  213. return -ENOMEM;
  214. }
  215. frame |= WCD_SPI_READ_FRAME_OPCODE;
  216. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  217. wcd_spi_reinit_xfer(tx_xfer);
  218. frame = cpu_to_be32(frame);
  219. memcpy(tx_buf, &frame, sizeof(frame));
  220. tx_xfer->tx_buf = tx_buf;
  221. tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
  222. wcd_spi_reinit_xfer(rx_xfer);
  223. rx_xfer->rx_buf = rx_buf;
  224. rx_xfer->len = sizeof(*val);
  225. ret = spi_sync(spi, &wcd_spi->msg2);
  226. if (ret)
  227. dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
  228. __func__, ret);
  229. else
  230. memcpy((u8*) val, rx_buf, sizeof(*val));
  231. return ret;
  232. }
  233. static int wcd_spi_read_multi(struct spi_device *spi,
  234. u32 remote_addr, u8 *data,
  235. size_t len)
  236. {
  237. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  238. struct spi_transfer *xfer = &wcd_spi->xfer1;
  239. u8 *tx_buf = wcd_spi->tx_buf;
  240. u8 *rx_buf = wcd_spi->rx_buf;
  241. u32 frame = 0;
  242. int ret;
  243. dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
  244. __func__, remote_addr, len);
  245. frame |= WCD_SPI_FREAD_FRAME_OPCODE;
  246. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  247. if (!tx_buf || !rx_buf) {
  248. dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
  249. (!tx_buf) ? "tx_buf" : "rx_buf");
  250. return -ENOMEM;
  251. }
  252. wcd_spi_reinit_xfer(xfer);
  253. frame = cpu_to_be32(frame);
  254. memcpy(tx_buf, &frame, sizeof(frame));
  255. xfer->tx_buf = tx_buf;
  256. xfer->rx_buf = rx_buf;
  257. xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
  258. ret = spi_sync(spi, &wcd_spi->msg1);
  259. if (ret) {
  260. dev_err(&spi->dev, "%s: failed, err = %d\n",
  261. __func__, ret);
  262. goto done;
  263. }
  264. memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
  265. done:
  266. return ret;
  267. }
  268. static int wcd_spi_write_single(struct spi_device *spi,
  269. u32 remote_addr, u32 val)
  270. {
  271. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  272. struct spi_transfer *xfer = &wcd_spi->xfer1;
  273. u8 *tx_buf = wcd_spi->tx_buf;
  274. u32 frame = 0;
  275. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
  276. __func__, remote_addr, val);
  277. memset(tx_buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
  278. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  279. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  280. frame = cpu_to_be32(frame);
  281. memcpy(tx_buf, &frame, sizeof(frame));
  282. memcpy(tx_buf + sizeof(frame), &val, sizeof(val));
  283. wcd_spi_reinit_xfer(xfer);
  284. xfer->tx_buf = tx_buf;
  285. xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
  286. return spi_sync(spi, &wcd_spi->msg1);
  287. }
  288. static int wcd_spi_write_multi(struct spi_device *spi,
  289. u32 remote_addr, u8 *data,
  290. size_t len)
  291. {
  292. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  293. struct spi_transfer *xfer = &wcd_spi->xfer1;
  294. u32 frame = 0;
  295. u8 *tx_buf = wcd_spi->tx_buf;
  296. int xfer_len, ret;
  297. dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
  298. __func__, remote_addr, len);
  299. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  300. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  301. frame = cpu_to_be32(frame);
  302. xfer_len = len + sizeof(frame);
  303. if (!tx_buf) {
  304. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  305. __func__);
  306. return -ENOMEM;
  307. }
  308. memcpy(tx_buf, &frame, sizeof(frame));
  309. memcpy(tx_buf + sizeof(frame), data, len);
  310. wcd_spi_reinit_xfer(xfer);
  311. xfer->tx_buf = tx_buf;
  312. xfer->len = xfer_len;
  313. ret = spi_sync(spi, &wcd_spi->msg1);
  314. if (ret < 0)
  315. dev_err(&spi->dev,
  316. "%s: Failed, addr = 0x%x, len = %zd\n",
  317. __func__, remote_addr, len);
  318. return ret;
  319. }
  320. static int wcd_spi_transfer_split(struct spi_device *spi,
  321. struct wcd_spi_msg *data_msg,
  322. enum xfer_request xfer_req)
  323. {
  324. u32 addr = data_msg->remote_addr;
  325. u8 *data = data_msg->data;
  326. int remain_size = data_msg->len;
  327. int to_xfer, loop_cnt, ret = 0;
  328. /* Perform single writes until multi word alignment is met */
  329. loop_cnt = 1;
  330. while (remain_size &&
  331. !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
  332. if (xfer_req == WCD_SPI_XFER_WRITE)
  333. ret = wcd_spi_write_single(spi, addr,
  334. (*(u32 *)data));
  335. else
  336. ret = wcd_spi_read_single(spi, addr,
  337. (u32 *)data);
  338. if (ret < 0) {
  339. dev_err(&spi->dev,
  340. "%s: %s fail iter(%d) start-word addr (0x%x)\n",
  341. __func__, wcd_spi_xfer_req_str(xfer_req),
  342. loop_cnt, addr);
  343. goto done;
  344. }
  345. addr += WCD_SPI_WORD_BYTE_CNT;
  346. data += WCD_SPI_WORD_BYTE_CNT;
  347. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  348. loop_cnt++;
  349. }
  350. /* Perform multi writes for max allowed multi writes */
  351. loop_cnt = 1;
  352. while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
  353. if (xfer_req == WCD_SPI_XFER_WRITE)
  354. ret = wcd_spi_write_multi(spi, addr, data,
  355. WCD_SPI_RW_MULTI_MAX_LEN);
  356. else
  357. ret = wcd_spi_read_multi(spi, addr, data,
  358. WCD_SPI_RW_MULTI_MAX_LEN);
  359. if (ret < 0) {
  360. dev_err(&spi->dev,
  361. "%s: %s fail iter(%d) max-write addr (0x%x)\n",
  362. __func__, wcd_spi_xfer_req_str(xfer_req),
  363. loop_cnt, addr);
  364. goto done;
  365. }
  366. addr += WCD_SPI_RW_MULTI_MAX_LEN;
  367. data += WCD_SPI_RW_MULTI_MAX_LEN;
  368. remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
  369. loop_cnt++;
  370. }
  371. /*
  372. * Perform write for max possible data that is multiple
  373. * of the minimum size for multi-write commands.
  374. */
  375. to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
  376. if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
  377. to_xfer > 0) {
  378. if (xfer_req == WCD_SPI_XFER_WRITE)
  379. ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
  380. else
  381. ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
  382. if (ret < 0) {
  383. dev_err(&spi->dev,
  384. "%s: %s fail write addr (0x%x), size (0x%x)\n",
  385. __func__, wcd_spi_xfer_req_str(xfer_req),
  386. addr, to_xfer);
  387. goto done;
  388. }
  389. addr += to_xfer;
  390. data += to_xfer;
  391. remain_size -= to_xfer;
  392. }
  393. /* Perform single writes for the last remaining data */
  394. loop_cnt = 1;
  395. while (remain_size > 0) {
  396. if (xfer_req == WCD_SPI_XFER_WRITE)
  397. ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
  398. else
  399. ret = wcd_spi_read_single(spi, addr, (u32 *) data);
  400. if (ret < 0) {
  401. dev_err(&spi->dev,
  402. "%s: %s fail iter(%d) end-write addr (0x%x)\n",
  403. __func__, wcd_spi_xfer_req_str(xfer_req),
  404. loop_cnt, addr);
  405. goto done;
  406. }
  407. addr += WCD_SPI_WORD_BYTE_CNT;
  408. data += WCD_SPI_WORD_BYTE_CNT;
  409. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  410. loop_cnt++;
  411. }
  412. done:
  413. return ret;
  414. }
  415. static int wcd_spi_cmd_nop(struct spi_device *spi)
  416. {
  417. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  418. u8 *tx_buf = wcd_spi->tx_buf;
  419. tx_buf[0] = WCD_SPI_CMD_NOP;
  420. return spi_write(spi, tx_buf, WCD_SPI_CMD_NOP_LEN);
  421. }
  422. static int wcd_spi_cmd_clkreq(struct spi_device *spi)
  423. {
  424. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  425. struct spi_transfer *xfer = &wcd_spi->xfer1;
  426. u8 *tx_buf = wcd_spi->tx_buf;
  427. u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
  428. WCD_SPI_CMD_CLKREQ,
  429. 0xBA, 0x80, 0x00};
  430. memcpy(tx_buf, cmd, WCD_SPI_CMD_CLKREQ_LEN);
  431. wcd_spi_reinit_xfer(xfer);
  432. xfer->tx_buf = tx_buf;
  433. xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
  434. xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
  435. return spi_sync(spi, &wcd_spi->msg1);
  436. }
  437. static int wcd_spi_cmd_wr_en(struct spi_device *spi)
  438. {
  439. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  440. u8 *tx_buf = wcd_spi->tx_buf;
  441. tx_buf[0] = WCD_SPI_CMD_WREN;
  442. return spi_write(spi, tx_buf, WCD_SPI_CMD_WREN_LEN);
  443. }
  444. static int wcd_spi_cmd_rdsr(struct spi_device *spi,
  445. u32 *rdsr_status)
  446. {
  447. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  448. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  449. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  450. u8 *tx_buf = wcd_spi->tx_buf;
  451. u8 *rx_buf = wcd_spi->rx_buf;
  452. int ret;
  453. tx_buf[0] = WCD_SPI_CMD_RDSR;
  454. wcd_spi_reinit_xfer(tx_xfer);
  455. tx_xfer->tx_buf = tx_buf;
  456. tx_xfer->len = WCD_SPI_OPCODE_LEN;
  457. memset(rx_buf, 0, sizeof(*rdsr_status));
  458. wcd_spi_reinit_xfer(rx_xfer);
  459. rx_xfer->rx_buf = rx_buf;
  460. rx_xfer->len = sizeof(*rdsr_status);
  461. ret = spi_sync(spi, &wcd_spi->msg2);
  462. if (ret < 0) {
  463. dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
  464. __func__, ret);
  465. goto done;
  466. }
  467. *rdsr_status = be32_to_cpu(*((u32*)rx_buf));
  468. dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
  469. __func__, *rdsr_status);
  470. done:
  471. return ret;
  472. }
  473. static int wcd_spi_clk_enable(struct spi_device *spi)
  474. {
  475. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  476. int ret;
  477. u32 rd_status = 0;
  478. /* Get the SPI access first */
  479. if (wcd_spi->ac_dev) {
  480. ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
  481. WCD_SPI_ACCESS_REQUEST,
  482. WCD_SPI_AC_DATA_TRANSFER);
  483. if (ret) {
  484. dev_err(&spi->dev,
  485. "%s: Can't get spi access, err = %d\n",
  486. __func__, ret);
  487. return ret;
  488. }
  489. }
  490. ret = wcd_spi_cmd_nop(spi);
  491. if (ret < 0) {
  492. dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
  493. __func__, ret);
  494. goto done;
  495. }
  496. ret = wcd_spi_cmd_clkreq(spi);
  497. if (ret < 0) {
  498. dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
  499. __func__, ret);
  500. goto done;
  501. }
  502. ret = wcd_spi_cmd_nop(spi);
  503. if (ret < 0) {
  504. dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
  505. __func__, ret);
  506. goto done;
  507. }
  508. wcd_spi_cmd_rdsr(spi, &rd_status);
  509. /*
  510. * Read status zero means reads are not
  511. * happenning on the bus, possibly because
  512. * clock request failed.
  513. */
  514. if (rd_status) {
  515. set_bit(WCD_SPI_CLK_STATE_ENABLED,
  516. &wcd_spi->status_mask);
  517. } else {
  518. dev_err(&spi->dev, "%s: RDSR status is zero\n",
  519. __func__);
  520. ret = -EIO;
  521. }
  522. done:
  523. return ret;
  524. }
  525. static int wcd_spi_clk_disable(struct spi_device *spi)
  526. {
  527. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  528. int ret;
  529. ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
  530. if (ret < 0)
  531. dev_err(&spi->dev, "%s: Failed, err = %d\n",
  532. __func__, ret);
  533. /*
  534. * clear this bit even if clock disable failed
  535. * as the source clocks might get turned off.
  536. */
  537. clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
  538. /* once the clock is released, SPI access can be released as well */
  539. if (wcd_spi->ac_dev) {
  540. ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
  541. WCD_SPI_ACCESS_RELEASE,
  542. WCD_SPI_AC_DATA_TRANSFER);
  543. if (ret)
  544. dev_err(&spi->dev,
  545. "%s: SPI access release failed, err = %d\n",
  546. __func__, ret);
  547. }
  548. return ret;
  549. }
  550. static int wcd_spi_clk_ctrl(struct spi_device *spi,
  551. bool request, u32 flags)
  552. {
  553. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  554. int ret = 0;
  555. const char *delay_str;
  556. delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
  557. "delayed" : "immediate";
  558. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  559. /* Reject any unbalanced disable request */
  560. if (wcd_spi->clk_users < 0 ||
  561. (!request && wcd_spi->clk_users == 0)) {
  562. dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
  563. __func__, wcd_spi->clk_users,
  564. request ? "enable" : "disable");
  565. ret = -EINVAL;
  566. /* Reset the clk_users to 0 */
  567. wcd_spi->clk_users = 0;
  568. goto done;
  569. }
  570. if (request == WCD_SPI_CLK_ENABLE) {
  571. /*
  572. * If the SPI bus is suspended, then return error
  573. * as the transaction cannot be completed.
  574. */
  575. if (wcd_spi_is_suspended(wcd_spi)) {
  576. dev_err(&spi->dev,
  577. "%s: SPI suspended, cannot enable clk\n",
  578. __func__);
  579. ret = -EIO;
  580. goto done;
  581. }
  582. /* Cancel the disable clk work */
  583. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  584. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  585. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  586. wcd_spi->clk_users++;
  587. /*
  588. * If clk state is already set,
  589. * then clk wasnt really disabled
  590. */
  591. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  592. goto done;
  593. else if (wcd_spi->clk_users == 1)
  594. ret = wcd_spi_clk_enable(spi);
  595. } else {
  596. wcd_spi->clk_users--;
  597. /* Clock is still voted for */
  598. if (wcd_spi->clk_users > 0)
  599. goto done;
  600. /*
  601. * If we are here, clk_users must be 0 and needs
  602. * to be disabled. Call the disable based on the
  603. * flags.
  604. */
  605. if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
  606. schedule_delayed_work(&wcd_spi->clk_dwork,
  607. msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
  608. } else {
  609. ret = wcd_spi_clk_disable(spi);
  610. if (ret < 0)
  611. dev_err(&spi->dev,
  612. "%s: Failed to disable clk err = %d\n",
  613. __func__, ret);
  614. }
  615. }
  616. done:
  617. dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
  618. __func__, wcd_spi->clk_users, request ? "enable" : "disable",
  619. request ? "" : delay_str);
  620. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  621. return ret;
  622. }
  623. static int wcd_spi_init(struct spi_device *spi)
  624. {
  625. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  626. int ret;
  627. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  628. WCD_SPI_CLK_FLAG_IMMEDIATE);
  629. if (ret < 0)
  630. goto done;
  631. ret = wcd_spi_cmd_wr_en(spi);
  632. if (ret < 0)
  633. goto err_wr_en;
  634. /*
  635. * In case spi_init is called after component deinit,
  636. * it is possible hardware register state is also reset.
  637. * Sync the regcache here so hardware state is updated
  638. * to reflect the cache.
  639. */
  640. regcache_sync(wcd_spi->regmap);
  641. regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
  642. 0x0F3D0800);
  643. /* Write the MTU to max allowed size */
  644. regmap_update_bits(wcd_spi->regmap,
  645. WCD_SPI_SLAVE_TRNS_LEN,
  646. 0xFFFF0000, 0xFFFF0000);
  647. err_wr_en:
  648. wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  649. WCD_SPI_CLK_FLAG_IMMEDIATE);
  650. done:
  651. return ret;
  652. }
  653. static void wcd_spi_clk_work(struct work_struct *work)
  654. {
  655. struct delayed_work *dwork;
  656. struct wcd_spi_priv *wcd_spi;
  657. struct spi_device *spi;
  658. int ret;
  659. dwork = to_delayed_work(work);
  660. wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
  661. spi = wcd_spi->spi;
  662. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  663. ret = wcd_spi_clk_disable(spi);
  664. if (ret < 0)
  665. dev_err(&spi->dev,
  666. "%s: Failed to disable clk, err = %d\n",
  667. __func__, ret);
  668. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  669. }
  670. static int __wcd_spi_data_xfer(struct spi_device *spi,
  671. struct wcd_spi_msg *msg,
  672. enum xfer_request xfer_req)
  673. {
  674. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  675. int ret;
  676. /* Check for minimum alignment requirements */
  677. if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
  678. dev_err(&spi->dev,
  679. "%s addr 0x%x is not aligned to 0x%x\n",
  680. __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
  681. return -EINVAL;
  682. } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
  683. dev_err(&spi->dev,
  684. "%s len 0x%zx is not multiple of %d\n",
  685. __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
  686. return -EINVAL;
  687. }
  688. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  689. if (wcd_spi_is_suspended(wcd_spi)) {
  690. dev_dbg(&spi->dev,
  691. "%s: SPI suspended, cannot perform transfer\n",
  692. __func__);
  693. ret = -EIO;
  694. goto done;
  695. }
  696. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
  697. if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
  698. if (xfer_req == WCD_SPI_XFER_WRITE)
  699. ret = wcd_spi_write_single(spi, msg->remote_addr,
  700. (*((u32 *)msg->data)));
  701. else
  702. ret = wcd_spi_read_single(spi, msg->remote_addr,
  703. (u32 *) msg->data);
  704. } else {
  705. ret = wcd_spi_transfer_split(spi, msg, xfer_req);
  706. }
  707. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
  708. done:
  709. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  710. return ret;
  711. }
  712. static int wcd_spi_data_xfer(struct spi_device *spi,
  713. struct wcd_spi_msg *msg,
  714. enum xfer_request req)
  715. {
  716. int ret, ret1;
  717. if (msg->len <= 0) {
  718. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  719. __func__, msg->len);
  720. return -EINVAL;
  721. }
  722. /* Request for clock */
  723. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  724. WCD_SPI_CLK_FLAG_IMMEDIATE);
  725. if (ret < 0) {
  726. dev_err(&spi->dev, "%s: clk enable failed %d\n",
  727. __func__, ret);
  728. goto done;
  729. }
  730. /* Perform the transaction */
  731. ret = __wcd_spi_data_xfer(spi, msg, req);
  732. if (ret < 0)
  733. dev_err(&spi->dev,
  734. "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
  735. __func__, wcd_spi_xfer_req_str(req),
  736. msg->remote_addr, msg->len, ret);
  737. /* Release the clock even if xfer failed */
  738. ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  739. WCD_SPI_CLK_FLAG_DELAYED);
  740. if (ret1 < 0)
  741. dev_err(&spi->dev, "%s: clk disable failed %d\n",
  742. __func__, ret1);
  743. done:
  744. return ret;
  745. }
  746. /*
  747. * wcd_spi_data_write: Write data to WCD SPI
  748. * @spi: spi_device struct
  749. * @msg: msg that needs to be written to WCD
  750. *
  751. * This API writes length of data to address specified. These details
  752. * about the write are encapsulated in @msg. Write size should be multiple
  753. * of 4 bytes and write address should be 4-byte aligned.
  754. */
  755. static int wcd_spi_data_write(struct spi_device *spi,
  756. struct wcd_spi_msg *msg)
  757. {
  758. if (!spi || !msg) {
  759. pr_err("%s: Invalid %s\n", __func__,
  760. (!spi) ? "spi device" : "msg");
  761. return -EINVAL;
  762. }
  763. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
  764. __func__, msg->remote_addr, msg->len);
  765. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
  766. }
  767. /*
  768. * wcd_spi_data_read: Read data from WCD SPI
  769. * @spi: spi_device struct
  770. * @msg: msg that needs to be read from WCD
  771. *
  772. * This API reads length of data from address specified. These details
  773. * about the read are encapsulated in @msg. Read size should be multiple
  774. * of 4 bytes and read address should be 4-byte aligned.
  775. */
  776. static int wcd_spi_data_read(struct spi_device *spi,
  777. struct wcd_spi_msg *msg)
  778. {
  779. if (!spi || !msg) {
  780. pr_err("%s: Invalid %s\n", __func__,
  781. (!spi) ? "spi device" : "msg");
  782. return -EINVAL;
  783. }
  784. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
  785. __func__, msg->remote_addr, msg->len);
  786. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
  787. }
  788. static int wdsp_spi_dload_section(struct spi_device *spi,
  789. void *data)
  790. {
  791. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  792. struct wdsp_img_section *sec = data;
  793. struct wcd_spi_msg msg;
  794. int ret;
  795. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  796. __func__, sec->addr, sec->size);
  797. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  798. msg.data = sec->data;
  799. msg.len = sec->size;
  800. ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
  801. if (ret < 0)
  802. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  803. __func__, msg.remote_addr, msg.len);
  804. return ret;
  805. }
  806. static int wdsp_spi_read_section(struct spi_device *spi, void *data)
  807. {
  808. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  809. struct wdsp_img_section *sec = data;
  810. struct wcd_spi_msg msg;
  811. int ret;
  812. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  813. msg.data = sec->data;
  814. msg.len = sec->size;
  815. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  816. __func__, msg.remote_addr, msg.len);
  817. ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
  818. if (ret < 0)
  819. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  820. __func__, msg.remote_addr, msg.len);
  821. return ret;
  822. }
  823. static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
  824. enum wdsp_event_type event,
  825. void *data)
  826. {
  827. struct spi_device *spi = to_spi_device(dev);
  828. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  829. struct wcd_spi_ops *spi_ops;
  830. int ret = 0;
  831. dev_dbg(&spi->dev, "%s: event type %d\n",
  832. __func__, event);
  833. switch (event) {
  834. case WDSP_EVENT_PRE_SHUTDOWN:
  835. if (wcd_spi->ac_dev) {
  836. ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
  837. WCD_SPI_ACCESS_REQUEST,
  838. WCD_SPI_AC_REMOTE_DOWN);
  839. if (ret)
  840. dev_err(&spi->dev,
  841. "%s: request access failed %d\n",
  842. __func__, ret);
  843. }
  844. break;
  845. case WDSP_EVENT_POST_SHUTDOWN:
  846. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  847. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  848. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  849. wcd_spi_clk_disable(spi);
  850. wcd_spi->clk_users = 0;
  851. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  852. break;
  853. case WDSP_EVENT_POST_BOOTUP:
  854. if (wcd_spi->ac_dev) {
  855. ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
  856. WCD_SPI_ACCESS_RELEASE,
  857. WCD_SPI_AC_REMOTE_DOWN);
  858. if (ret)
  859. dev_err(&spi->dev,
  860. "%s: release access failed %d\n",
  861. __func__, ret);
  862. }
  863. break;
  864. case WDSP_EVENT_PRE_DLOAD_CODE:
  865. case WDSP_EVENT_PRE_DLOAD_DATA:
  866. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  867. WCD_SPI_CLK_FLAG_IMMEDIATE);
  868. if (ret < 0)
  869. dev_err(&spi->dev, "%s: clk_req failed %d\n",
  870. __func__, ret);
  871. break;
  872. case WDSP_EVENT_POST_DLOAD_CODE:
  873. case WDSP_EVENT_POST_DLOAD_DATA:
  874. case WDSP_EVENT_DLOAD_FAILED:
  875. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  876. WCD_SPI_CLK_FLAG_IMMEDIATE);
  877. if (ret < 0)
  878. dev_err(&spi->dev, "%s: clk unvote failed %d\n",
  879. __func__, ret);
  880. break;
  881. case WDSP_EVENT_DLOAD_SECTION:
  882. ret = wdsp_spi_dload_section(spi, data);
  883. break;
  884. case WDSP_EVENT_READ_SECTION:
  885. ret = wdsp_spi_read_section(spi, data);
  886. break;
  887. case WDSP_EVENT_SUSPEND:
  888. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  889. if (!wcd_spi_can_suspend(wcd_spi))
  890. ret = -EBUSY;
  891. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  892. break;
  893. case WDSP_EVENT_RESUME:
  894. ret = wcd_spi_wait_for_resume(wcd_spi);
  895. break;
  896. case WDSP_EVENT_GET_DEVOPS:
  897. if (!data) {
  898. dev_err(&spi->dev, "%s: invalid data\n",
  899. __func__);
  900. ret = -EINVAL;
  901. break;
  902. }
  903. spi_ops = (struct wcd_spi_ops *) data;
  904. spi_ops->spi_dev = spi;
  905. spi_ops->read_dev = wcd_spi_data_read;
  906. spi_ops->write_dev = wcd_spi_data_write;
  907. break;
  908. default:
  909. dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
  910. __func__, event);
  911. break;
  912. }
  913. return ret;
  914. }
  915. static int wcd_spi_bus_gwrite(void *context, const void *reg,
  916. size_t reg_len, const void *val,
  917. size_t val_len)
  918. {
  919. struct device *dev = context;
  920. struct spi_device *spi = to_spi_device(dev);
  921. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  922. u8 *tx_buf = wcd_spi->tx_buf;
  923. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  924. val_len != wcd_spi->val_bytes) {
  925. dev_err(&spi->dev,
  926. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  927. __func__, reg_len, val_len);
  928. return -EINVAL;
  929. }
  930. memset(tx_buf, 0, WCD_SPI_CMD_IRW_LEN);
  931. tx_buf[0] = WCD_SPI_CMD_IRW;
  932. tx_buf[1] = *((u8 *)reg);
  933. memcpy(tx_buf + WCD_SPI_OPCODE_LEN + reg_len,
  934. val, val_len);
  935. return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
  936. }
  937. static int wcd_spi_bus_write(void *context, const void *data,
  938. size_t count)
  939. {
  940. struct device *dev = context;
  941. struct spi_device *spi = to_spi_device(dev);
  942. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  943. if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
  944. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  945. __func__, count);
  946. WARN_ON(1);
  947. return -EINVAL;
  948. }
  949. return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
  950. data + wcd_spi->reg_bytes,
  951. count - wcd_spi->reg_bytes);
  952. }
  953. static int wcd_spi_bus_read(void *context, const void *reg,
  954. size_t reg_len, void *val,
  955. size_t val_len)
  956. {
  957. struct device *dev = context;
  958. struct spi_device *spi = to_spi_device(dev);
  959. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  960. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  961. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  962. u8 *tx_buf = wcd_spi->tx_buf;
  963. u8 *rx_buf = wcd_spi->rx_buf;
  964. int ret = 0;
  965. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  966. val_len != wcd_spi->val_bytes) {
  967. dev_err(&spi->dev,
  968. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  969. __func__, reg_len, val_len);
  970. return -EINVAL;
  971. }
  972. memset(tx_buf, 0, WCD_SPI_CMD_IRR_LEN);
  973. tx_buf[0] = WCD_SPI_CMD_IRR;
  974. tx_buf[1] = *((u8 *)reg);
  975. wcd_spi_reinit_xfer(tx_xfer);
  976. tx_xfer->tx_buf = tx_buf;
  977. tx_xfer->rx_buf = NULL;
  978. tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
  979. wcd_spi_reinit_xfer(rx_xfer);
  980. rx_xfer->tx_buf = NULL;
  981. rx_xfer->rx_buf = rx_buf;
  982. rx_xfer->len = val_len;
  983. ret = spi_sync(spi, &wcd_spi->msg2);
  984. if (ret) {
  985. dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
  986. __func__, ret);
  987. goto done;
  988. }
  989. memcpy(val, rx_buf, val_len);
  990. done:
  991. return ret;
  992. }
  993. static struct regmap_bus wcd_spi_regmap_bus = {
  994. .write = wcd_spi_bus_write,
  995. .gather_write = wcd_spi_bus_gwrite,
  996. .read = wcd_spi_bus_read,
  997. .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
  998. .val_format_endian_default = REGMAP_ENDIAN_BIG,
  999. };
  1000. static int wcd_spi_state_show(struct seq_file *f, void *ptr)
  1001. {
  1002. struct spi_device *spi = f->private;
  1003. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1004. const char *clk_state, *clk_mutex, *xfer_mutex;
  1005. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  1006. clk_state = "enabled";
  1007. else
  1008. clk_state = "disabled";
  1009. clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
  1010. "locked" : "unlocked";
  1011. xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
  1012. "locked" : "unlocked";
  1013. seq_printf(f, "clk_state = %s\nclk_users = %d\n"
  1014. "clk_mutex = %s\nxfer_mutex = %s\n",
  1015. clk_state, wcd_spi->clk_users, clk_mutex,
  1016. xfer_mutex);
  1017. return 0;
  1018. }
  1019. static int wcd_spi_state_open(struct inode *inode, struct file *file)
  1020. {
  1021. return single_open(file, wcd_spi_state_show, inode->i_private);
  1022. }
  1023. static const struct file_operations state_fops = {
  1024. .open = wcd_spi_state_open,
  1025. .read = seq_read,
  1026. .llseek = seq_lseek,
  1027. .release = single_release,
  1028. };
  1029. static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
  1030. size_t count, loff_t *ppos)
  1031. {
  1032. struct spi_device *spi = file->private_data;
  1033. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1034. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1035. struct wcd_spi_msg msg;
  1036. ssize_t buf_size, read_count = 0;
  1037. char *buf;
  1038. int ret;
  1039. if (*ppos < 0 || !count)
  1040. return -EINVAL;
  1041. if (dbg_data->size == 0 || dbg_data->addr == 0) {
  1042. dev_err(&spi->dev,
  1043. "%s: Invalid request, size = %u, addr = 0x%x\n",
  1044. __func__, dbg_data->size, dbg_data->addr);
  1045. return 0;
  1046. }
  1047. buf_size = count < dbg_data->size ? count : dbg_data->size;
  1048. buf = kzalloc(buf_size, GFP_KERNEL);
  1049. if (!buf)
  1050. return -ENOMEM;
  1051. msg.data = buf;
  1052. msg.remote_addr = dbg_data->addr;
  1053. msg.len = buf_size;
  1054. msg.flags = 0;
  1055. ret = wcd_spi_data_read(spi, &msg);
  1056. if (ret < 0) {
  1057. dev_err(&spi->dev,
  1058. "%s: Failed to read %zu bytes from addr 0x%x\n",
  1059. __func__, buf_size, msg.remote_addr);
  1060. goto done;
  1061. }
  1062. read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
  1063. done:
  1064. kfree(buf);
  1065. if (ret < 0)
  1066. return ret;
  1067. else
  1068. return read_count;
  1069. }
  1070. static const struct file_operations mem_read_fops = {
  1071. .open = simple_open,
  1072. .read = wcd_spi_debugfs_mem_read,
  1073. };
  1074. static int wcd_spi_debugfs_init(struct spi_device *spi)
  1075. {
  1076. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1077. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1078. int rc = 0;
  1079. dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
  1080. if (IS_ERR_OR_NULL(dbg_data->dir)) {
  1081. dbg_data->dir = NULL;
  1082. rc = -ENODEV;
  1083. goto done;
  1084. }
  1085. debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
  1086. debugfs_create_u32("addr", 0644, dbg_data->dir,
  1087. &dbg_data->addr);
  1088. debugfs_create_u32("size", 0644, dbg_data->dir,
  1089. &dbg_data->size);
  1090. debugfs_create_file("mem_read", 0444, dbg_data->dir,
  1091. spi, &mem_read_fops);
  1092. done:
  1093. return rc;
  1094. }
  1095. static const struct reg_default wcd_spi_defaults[] = {
  1096. {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
  1097. {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
  1098. {WCD_SPI_SLAVE_STATUS, 0x80100000},
  1099. {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
  1100. {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
  1101. {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
  1102. {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
  1103. {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
  1104. {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
  1105. {WCD_SPI_SLAVE_TX, 0x00000000},
  1106. {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
  1107. {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
  1108. {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
  1109. {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
  1110. {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
  1111. {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
  1112. {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
  1113. {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
  1114. {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
  1115. {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
  1116. {WCD_SPI_SLAVE_GENERICS, 0x80000000},
  1117. {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
  1118. };
  1119. static bool wcd_spi_is_volatile_reg(struct device *dev,
  1120. unsigned int reg)
  1121. {
  1122. switch (reg) {
  1123. case WCD_SPI_SLAVE_SANITY:
  1124. case WCD_SPI_SLAVE_STATUS:
  1125. case WCD_SPI_SLAVE_IRQ_STATUS:
  1126. case WCD_SPI_SLAVE_TX:
  1127. case WCD_SPI_SLAVE_SW_RST_IRQ:
  1128. case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
  1129. case WCD_SPI_SLAVE_FIFO_LEVEL:
  1130. case WCD_SPI_SLAVE_GENERICS:
  1131. return true;
  1132. }
  1133. return false;
  1134. }
  1135. static bool wcd_spi_is_readable_reg(struct device *dev,
  1136. unsigned int reg)
  1137. {
  1138. switch (reg) {
  1139. case WCD_SPI_SLAVE_SW_RESET:
  1140. case WCD_SPI_SLAVE_IRQ_CLR:
  1141. case WCD_SPI_SLAVE_IRQ_FORCE:
  1142. return false;
  1143. }
  1144. return true;
  1145. }
  1146. static struct regmap_config wcd_spi_regmap_cfg = {
  1147. .reg_bits = 8,
  1148. .val_bits = 32,
  1149. .cache_type = REGCACHE_RBTREE,
  1150. .reg_defaults = wcd_spi_defaults,
  1151. .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
  1152. .max_register = WCD_SPI_MAX_REGISTER,
  1153. .volatile_reg = wcd_spi_is_volatile_reg,
  1154. .readable_reg = wcd_spi_is_readable_reg,
  1155. };
  1156. static int wcd_spi_add_ac_dev(struct device *dev,
  1157. struct device_node *node)
  1158. {
  1159. struct spi_device *spi = to_spi_device(dev);
  1160. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1161. struct platform_device *pdev;
  1162. int ret = 0;
  1163. pdev = platform_device_alloc("wcd-spi-ac", -1);
  1164. if (IS_ERR_OR_NULL(pdev)) {
  1165. ret = PTR_ERR(pdev);
  1166. dev_err(dev, "%s: pdev alloc failed, ret = %d\n",
  1167. __func__, ret);
  1168. return ret;
  1169. }
  1170. pdev->dev.parent = dev;
  1171. pdev->dev.of_node = node;
  1172. ret = platform_device_add(pdev);
  1173. if (ret) {
  1174. dev_err(dev, "%s: pdev add failed, ret = %d\n",
  1175. __func__, ret);
  1176. goto dealloc_pdev;
  1177. }
  1178. wcd_spi->ac_dev = &pdev->dev;
  1179. return 0;
  1180. dealloc_pdev:
  1181. platform_device_put(pdev);
  1182. return ret;
  1183. }
  1184. static int wdsp_spi_init(struct device *dev, void *priv_data)
  1185. {
  1186. struct spi_device *spi = to_spi_device(dev);
  1187. int ret;
  1188. struct device_node *node;
  1189. for_each_child_of_node(dev->of_node, node) {
  1190. if (!strcmp(node->name, "wcd_spi_ac"))
  1191. wcd_spi_add_ac_dev(dev, node);
  1192. }
  1193. ret = wcd_spi_init(spi);
  1194. if (ret < 0)
  1195. dev_err(&spi->dev, "%s: Init failed, err = %d\n",
  1196. __func__, ret);
  1197. return ret;
  1198. }
  1199. static int wdsp_spi_deinit(struct device *dev, void *priv_data)
  1200. {
  1201. struct spi_device *spi = to_spi_device(dev);
  1202. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1203. /*
  1204. * Deinit means the hardware is reset. Mark the cache
  1205. * as dirty here, so init will sync the cache
  1206. */
  1207. regcache_mark_dirty(wcd_spi->regmap);
  1208. return 0;
  1209. }
  1210. static struct wdsp_cmpnt_ops wdsp_spi_ops = {
  1211. .init = wdsp_spi_init,
  1212. .deinit = wdsp_spi_deinit,
  1213. .event_handler = wdsp_spi_event_handler,
  1214. };
  1215. static int wcd_spi_component_bind(struct device *dev,
  1216. struct device *master,
  1217. void *data)
  1218. {
  1219. struct spi_device *spi = to_spi_device(dev);
  1220. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1221. int ret = 0;
  1222. wcd_spi->m_dev = master;
  1223. wcd_spi->m_ops = data;
  1224. if (wcd_spi->m_ops &&
  1225. wcd_spi->m_ops->register_cmpnt_ops)
  1226. ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
  1227. wcd_spi,
  1228. &wdsp_spi_ops);
  1229. if (ret) {
  1230. dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
  1231. __func__, ret);
  1232. goto done;
  1233. }
  1234. wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
  1235. wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
  1236. wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
  1237. &spi->dev, &wcd_spi_regmap_cfg);
  1238. if (IS_ERR(wcd_spi->regmap)) {
  1239. ret = PTR_ERR(wcd_spi->regmap);
  1240. dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
  1241. __func__, ret);
  1242. goto done;
  1243. }
  1244. if (wcd_spi_debugfs_init(spi))
  1245. dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
  1246. spi_message_init(&wcd_spi->msg1);
  1247. spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
  1248. spi_message_init(&wcd_spi->msg2);
  1249. spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
  1250. spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
  1251. /* Pre-allocate the buffers */
  1252. wcd_spi->tx_buf = dma_zalloc_coherent(&spi->dev,
  1253. WCD_SPI_RW_MAX_BUF_SIZE,
  1254. &wcd_spi->tx_dma, GFP_KERNEL);
  1255. if (!wcd_spi->tx_buf) {
  1256. ret = -ENOMEM;
  1257. goto done;
  1258. }
  1259. wcd_spi->rx_buf = dma_zalloc_coherent(&spi->dev,
  1260. WCD_SPI_RW_MAX_BUF_SIZE,
  1261. &wcd_spi->rx_dma, GFP_KERNEL);
  1262. if (!wcd_spi->rx_buf) {
  1263. dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
  1264. wcd_spi->tx_buf, wcd_spi->tx_dma);
  1265. wcd_spi->tx_buf = NULL;
  1266. ret = -ENOMEM;
  1267. goto done;
  1268. }
  1269. done:
  1270. return ret;
  1271. }
  1272. static void wcd_spi_component_unbind(struct device *dev,
  1273. struct device *master,
  1274. void *data)
  1275. {
  1276. struct spi_device *spi = to_spi_device(dev);
  1277. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1278. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1279. debugfs_remove_recursive(dbg_data->dir);
  1280. dbg_data->dir = NULL;
  1281. wcd_spi->m_dev = NULL;
  1282. wcd_spi->m_ops = NULL;
  1283. spi_transfer_del(&wcd_spi->xfer1);
  1284. spi_transfer_del(&wcd_spi->xfer2[0]);
  1285. spi_transfer_del(&wcd_spi->xfer2[1]);
  1286. dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
  1287. wcd_spi->tx_buf, wcd_spi->tx_dma);
  1288. dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
  1289. wcd_spi->rx_buf, wcd_spi->rx_dma);
  1290. wcd_spi->tx_buf = NULL;
  1291. wcd_spi->rx_buf = NULL;
  1292. }
  1293. static const struct component_ops wcd_spi_component_ops = {
  1294. .bind = wcd_spi_component_bind,
  1295. .unbind = wcd_spi_component_unbind,
  1296. };
  1297. static int wcd_spi_probe(struct spi_device *spi)
  1298. {
  1299. struct wcd_spi_priv *wcd_spi;
  1300. int ret = 0;
  1301. wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
  1302. GFP_KERNEL);
  1303. if (!wcd_spi)
  1304. return -ENOMEM;
  1305. ret = of_property_read_u32(spi->dev.of_node,
  1306. "qcom,mem-base-addr",
  1307. &wcd_spi->mem_base_addr);
  1308. if (ret < 0) {
  1309. dev_err(&spi->dev, "%s: Missing %s DT entry",
  1310. __func__, "qcom,mem-base-addr");
  1311. goto err_ret;
  1312. }
  1313. dev_dbg(&spi->dev,
  1314. "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
  1315. mutex_init(&wcd_spi->clk_mutex);
  1316. mutex_init(&wcd_spi->xfer_mutex);
  1317. INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
  1318. init_completion(&wcd_spi->resume_comp);
  1319. arch_setup_dma_ops(&spi->dev, 0, 0, NULL, true);
  1320. wcd_spi->spi = spi;
  1321. spi_set_drvdata(spi, wcd_spi);
  1322. ret = component_add(&spi->dev, &wcd_spi_component_ops);
  1323. if (ret) {
  1324. dev_err(&spi->dev, "%s: component_add failed err = %d\n",
  1325. __func__, ret);
  1326. goto err_component_add;
  1327. }
  1328. return ret;
  1329. err_component_add:
  1330. mutex_destroy(&wcd_spi->clk_mutex);
  1331. mutex_destroy(&wcd_spi->xfer_mutex);
  1332. err_ret:
  1333. devm_kfree(&spi->dev, wcd_spi);
  1334. spi_set_drvdata(spi, NULL);
  1335. return ret;
  1336. }
  1337. static int wcd_spi_remove(struct spi_device *spi)
  1338. {
  1339. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1340. component_del(&spi->dev, &wcd_spi_component_ops);
  1341. mutex_destroy(&wcd_spi->clk_mutex);
  1342. mutex_destroy(&wcd_spi->xfer_mutex);
  1343. devm_kfree(&spi->dev, wcd_spi);
  1344. spi_set_drvdata(spi, NULL);
  1345. return 0;
  1346. }
  1347. #ifdef CONFIG_PM
  1348. static int wcd_spi_suspend(struct device *dev)
  1349. {
  1350. struct spi_device *spi = to_spi_device(dev);
  1351. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1352. int rc = 0;
  1353. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1354. if (!wcd_spi_can_suspend(wcd_spi)) {
  1355. rc = -EBUSY;
  1356. goto done;
  1357. }
  1358. /*
  1359. * If we are here, it is okay to let the suspend go
  1360. * through for this driver. But, still need to notify
  1361. * the master to make sure all other components can suspend
  1362. * as well.
  1363. */
  1364. if (wcd_spi->m_dev && wcd_spi->m_ops &&
  1365. wcd_spi->m_ops->suspend) {
  1366. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1367. rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
  1368. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1369. }
  1370. if (rc == 0)
  1371. set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1372. else
  1373. dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
  1374. __func__, rc);
  1375. done:
  1376. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1377. return rc;
  1378. }
  1379. static int wcd_spi_resume(struct device *dev)
  1380. {
  1381. struct spi_device *spi = to_spi_device(dev);
  1382. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1383. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1384. clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1385. complete(&wcd_spi->resume_comp);
  1386. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1387. return 0;
  1388. }
  1389. static const struct dev_pm_ops wcd_spi_pm_ops = {
  1390. .suspend = wcd_spi_suspend,
  1391. .resume = wcd_spi_resume,
  1392. };
  1393. #endif
  1394. static const struct of_device_id wcd_spi_of_match[] = {
  1395. { .compatible = "qcom,wcd-spi-v2", },
  1396. { }
  1397. };
  1398. MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
  1399. static struct spi_driver wcd_spi_driver = {
  1400. .driver = {
  1401. .name = "wcd-spi-v2",
  1402. .of_match_table = wcd_spi_of_match,
  1403. #ifdef CONFIG_PM
  1404. .pm = &wcd_spi_pm_ops,
  1405. #endif
  1406. },
  1407. .probe = wcd_spi_probe,
  1408. .remove = wcd_spi_remove,
  1409. };
  1410. module_spi_driver(wcd_spi_driver);
  1411. MODULE_DESCRIPTION("WCD SPI driver");
  1412. MODULE_LICENSE("GPL v2");