wcd-spi.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569
  1. /*
  2. * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/delay.h>
  18. #include <linux/bitops.h>
  19. #include <linux/spi/spi.h>
  20. #include <linux/regmap.h>
  21. #include <linux/component.h>
  22. #include <linux/ratelimit.h>
  23. #include <sound/wcd-dsp-mgr.h>
  24. #include <sound/wcd-spi.h>
  25. #include "wcd-spi-registers.h"
  26. /* Byte manipulations */
  27. #define SHIFT_1_BYTES (8)
  28. #define SHIFT_2_BYTES (16)
  29. #define SHIFT_3_BYTES (24)
  30. /* Command opcodes */
  31. #define WCD_SPI_CMD_NOP (0x00)
  32. #define WCD_SPI_CMD_WREN (0x06)
  33. #define WCD_SPI_CMD_CLKREQ (0xDA)
  34. #define WCD_SPI_CMD_RDSR (0x05)
  35. #define WCD_SPI_CMD_IRR (0x81)
  36. #define WCD_SPI_CMD_IRW (0x82)
  37. #define WCD_SPI_CMD_MIOR (0x83)
  38. #define WCD_SPI_CMD_FREAD (0x0B)
  39. #define WCD_SPI_CMD_MIOW (0x02)
  40. #define WCD_SPI_WRITE_FRAME_OPCODE \
  41. (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
  42. #define WCD_SPI_READ_FRAME_OPCODE \
  43. (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
  44. #define WCD_SPI_FREAD_FRAME_OPCODE \
  45. (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
  46. /* Command lengths */
  47. #define WCD_SPI_OPCODE_LEN (0x01)
  48. #define WCD_SPI_CMD_NOP_LEN (0x01)
  49. #define WCD_SPI_CMD_WREN_LEN (0x01)
  50. #define WCD_SPI_CMD_CLKREQ_LEN (0x04)
  51. #define WCD_SPI_CMD_IRR_LEN (0x04)
  52. #define WCD_SPI_CMD_IRW_LEN (0x06)
  53. #define WCD_SPI_WRITE_SINGLE_LEN (0x08)
  54. #define WCD_SPI_READ_SINGLE_LEN (0x13)
  55. #define WCD_SPI_CMD_FREAD_LEN (0x13)
  56. /* Command delays */
  57. #define WCD_SPI_CLKREQ_DELAY_USECS (500)
  58. #define WCD_SPI_CLK_OFF_TIMER_MS (500)
  59. #define WCD_SPI_RESUME_TIMEOUT_MS 100
  60. /* Command masks */
  61. #define WCD_CMD_ADDR_MASK \
  62. (0xFF | \
  63. (0xFF << SHIFT_1_BYTES) | \
  64. (0xFF << SHIFT_2_BYTES))
  65. /* Clock ctrl request related */
  66. #define WCD_SPI_CLK_ENABLE true
  67. #define WCD_SPI_CLK_DISABLE false
  68. #define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
  69. #define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
  70. /* Internal addresses */
  71. #define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
  72. /* Word sizes and min/max lengths */
  73. #define WCD_SPI_WORD_BYTE_CNT (4)
  74. #define WCD_SPI_RW_MULTI_MIN_LEN (16)
  75. /* Max size is 32 bytes less than 64Kbytes */
  76. #define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
  77. /*
  78. * Max size for the pre-allocated buffers is the max
  79. * possible read/write length + 32 bytes for the SPI
  80. * read/write command header itself.
  81. */
  82. #define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
  83. /* Alignment requirements */
  84. #define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
  85. #define WCD_SPI_RW_MULTI_ALIGN (16)
  86. /* Status mask bits */
  87. #define WCD_SPI_CLK_STATE_ENABLED BIT(0)
  88. #define WCD_SPI_IS_SUSPENDED BIT(1)
  89. /* Locking related */
  90. #define WCD_SPI_MUTEX_LOCK(spi, lock) \
  91. { \
  92. dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
  93. __func__, __stringify_1(lock)); \
  94. mutex_lock(&lock); \
  95. }
  96. #define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
  97. { \
  98. dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
  99. __func__, __stringify_1(lock)); \
  100. mutex_unlock(&lock); \
  101. }
  102. struct wcd_spi_debug_data {
  103. struct dentry *dir;
  104. u32 addr;
  105. u32 size;
  106. };
  107. struct wcd_spi_priv {
  108. struct spi_device *spi;
  109. u32 mem_base_addr;
  110. struct regmap *regmap;
  111. /* Message for single transfer */
  112. struct spi_message msg1;
  113. struct spi_transfer xfer1;
  114. /* Message for two transfers */
  115. struct spi_message msg2;
  116. struct spi_transfer xfer2[2];
  117. /* Register access related */
  118. u32 reg_bytes;
  119. u32 val_bytes;
  120. /* Clock requests related */
  121. struct mutex clk_mutex;
  122. int clk_users;
  123. unsigned long status_mask;
  124. struct delayed_work clk_dwork;
  125. /* Transaction related */
  126. struct mutex xfer_mutex;
  127. struct device *m_dev;
  128. struct wdsp_mgr_ops *m_ops;
  129. /* Debugfs related information */
  130. struct wcd_spi_debug_data debug_data;
  131. /* Completion object to indicate system resume completion */
  132. struct completion resume_comp;
  133. /* Buffers to hold memory used for transfers */
  134. void *tx_buf;
  135. void *rx_buf;
  136. };
  137. enum xfer_request {
  138. WCD_SPI_XFER_WRITE,
  139. WCD_SPI_XFER_READ,
  140. };
  141. static char *wcd_spi_xfer_req_str(enum xfer_request req)
  142. {
  143. if (req == WCD_SPI_XFER_WRITE)
  144. return "xfer_write";
  145. else if (req == WCD_SPI_XFER_READ)
  146. return "xfer_read";
  147. else
  148. return "xfer_invalid";
  149. }
  150. static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
  151. {
  152. xfer->tx_buf = NULL;
  153. xfer->rx_buf = NULL;
  154. xfer->delay_usecs = 0;
  155. xfer->len = 0;
  156. }
  157. static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
  158. {
  159. return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  160. }
  161. static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
  162. {
  163. struct spi_device *spi = wcd_spi->spi;
  164. if (wcd_spi->clk_users > 0 ||
  165. test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
  166. dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
  167. __func__, wcd_spi->clk_users);
  168. return false;
  169. }
  170. return true;
  171. }
  172. static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
  173. {
  174. struct spi_device *spi = wcd_spi->spi;
  175. int rc = 0;
  176. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  177. /* If the system is already in resumed state, return right away */
  178. if (!wcd_spi_is_suspended(wcd_spi))
  179. goto done;
  180. /* If suspended then wait for resume to happen */
  181. reinit_completion(&wcd_spi->resume_comp);
  182. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  183. rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
  184. msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
  185. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  186. if (rc == 0) {
  187. dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
  188. __func__, WCD_SPI_RESUME_TIMEOUT_MS);
  189. rc = -EIO;
  190. goto done;
  191. }
  192. dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
  193. rc = 0;
  194. done:
  195. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  196. return rc;
  197. }
  198. static int wcd_spi_read_single(struct spi_device *spi,
  199. u32 remote_addr, u32 *val)
  200. {
  201. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  202. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  203. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  204. u8 *tx_buf = wcd_spi->tx_buf;
  205. u8 *rx_buf = wcd_spi->rx_buf;
  206. u32 frame = 0;
  207. int ret;
  208. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
  209. __func__, remote_addr);
  210. if (!tx_buf) {
  211. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  212. __func__);
  213. return -ENOMEM;
  214. }
  215. frame |= WCD_SPI_READ_FRAME_OPCODE;
  216. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  217. wcd_spi_reinit_xfer(tx_xfer);
  218. frame = cpu_to_be32(frame);
  219. memcpy(tx_buf, &frame, sizeof(frame));
  220. tx_xfer->tx_buf = tx_buf;
  221. tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
  222. wcd_spi_reinit_xfer(rx_xfer);
  223. rx_xfer->rx_buf = rx_buf;
  224. rx_xfer->len = sizeof(*val);
  225. ret = spi_sync(spi, &wcd_spi->msg2);
  226. if (ret)
  227. dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
  228. __func__, ret);
  229. else
  230. memcpy((u8*) val, rx_buf, sizeof(*val));
  231. return ret;
  232. }
  233. static int wcd_spi_read_multi(struct spi_device *spi,
  234. u32 remote_addr, u8 *data,
  235. size_t len)
  236. {
  237. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  238. struct spi_transfer *xfer = &wcd_spi->xfer1;
  239. u8 *tx_buf = wcd_spi->tx_buf;
  240. u8 *rx_buf = wcd_spi->rx_buf;
  241. u32 frame = 0;
  242. int ret;
  243. dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
  244. __func__, remote_addr, len);
  245. frame |= WCD_SPI_FREAD_FRAME_OPCODE;
  246. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  247. if (!tx_buf || !rx_buf) {
  248. dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
  249. (!tx_buf) ? "tx_buf" : "rx_buf");
  250. return -ENOMEM;
  251. }
  252. wcd_spi_reinit_xfer(xfer);
  253. frame = cpu_to_be32(frame);
  254. memcpy(tx_buf, &frame, sizeof(frame));
  255. xfer->tx_buf = tx_buf;
  256. xfer->rx_buf = rx_buf;
  257. xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
  258. ret = spi_sync(spi, &wcd_spi->msg1);
  259. if (ret) {
  260. dev_err(&spi->dev, "%s: failed, err = %d\n",
  261. __func__, ret);
  262. goto done;
  263. }
  264. memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
  265. done:
  266. return ret;
  267. }
  268. static int wcd_spi_write_single(struct spi_device *spi,
  269. u32 remote_addr, u32 val)
  270. {
  271. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  272. struct spi_transfer *xfer = &wcd_spi->xfer1;
  273. u8 *tx_buf = wcd_spi->tx_buf;
  274. u32 frame = 0;
  275. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
  276. __func__, remote_addr, val);
  277. memset(tx_buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
  278. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  279. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  280. frame = cpu_to_be32(frame);
  281. memcpy(tx_buf, &frame, sizeof(frame));
  282. memcpy(tx_buf + sizeof(frame), &val, sizeof(val));
  283. wcd_spi_reinit_xfer(xfer);
  284. xfer->tx_buf = tx_buf;
  285. xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
  286. return spi_sync(spi, &wcd_spi->msg1);
  287. }
  288. static int wcd_spi_write_multi(struct spi_device *spi,
  289. u32 remote_addr, u8 *data,
  290. size_t len)
  291. {
  292. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  293. struct spi_transfer *xfer = &wcd_spi->xfer1;
  294. u32 frame = 0;
  295. u8 *tx_buf = wcd_spi->tx_buf;
  296. int xfer_len, ret;
  297. dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
  298. __func__, remote_addr, len);
  299. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  300. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  301. frame = cpu_to_be32(frame);
  302. xfer_len = len + sizeof(frame);
  303. if (!tx_buf) {
  304. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  305. __func__);
  306. return -ENOMEM;
  307. }
  308. memcpy(tx_buf, &frame, sizeof(frame));
  309. memcpy(tx_buf + sizeof(frame), data, len);
  310. wcd_spi_reinit_xfer(xfer);
  311. xfer->tx_buf = tx_buf;
  312. xfer->len = xfer_len;
  313. ret = spi_sync(spi, &wcd_spi->msg1);
  314. if (ret < 0)
  315. dev_err(&spi->dev,
  316. "%s: Failed, addr = 0x%x, len = %zd\n",
  317. __func__, remote_addr, len);
  318. return ret;
  319. }
  320. static int wcd_spi_transfer_split(struct spi_device *spi,
  321. struct wcd_spi_msg *data_msg,
  322. enum xfer_request xfer_req)
  323. {
  324. u32 addr = data_msg->remote_addr;
  325. u8 *data = data_msg->data;
  326. int remain_size = data_msg->len;
  327. int to_xfer, loop_cnt, ret = 0;
  328. /* Perform single writes until multi word alignment is met */
  329. loop_cnt = 1;
  330. while (remain_size &&
  331. !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
  332. if (xfer_req == WCD_SPI_XFER_WRITE)
  333. ret = wcd_spi_write_single(spi, addr,
  334. (*(u32 *)data));
  335. else
  336. ret = wcd_spi_read_single(spi, addr,
  337. (u32 *)data);
  338. if (ret < 0) {
  339. dev_err(&spi->dev,
  340. "%s: %s fail iter(%d) start-word addr (0x%x)\n",
  341. __func__, wcd_spi_xfer_req_str(xfer_req),
  342. loop_cnt, addr);
  343. goto done;
  344. }
  345. addr += WCD_SPI_WORD_BYTE_CNT;
  346. data += WCD_SPI_WORD_BYTE_CNT;
  347. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  348. loop_cnt++;
  349. }
  350. /* Perform multi writes for max allowed multi writes */
  351. loop_cnt = 1;
  352. while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
  353. if (xfer_req == WCD_SPI_XFER_WRITE)
  354. ret = wcd_spi_write_multi(spi, addr, data,
  355. WCD_SPI_RW_MULTI_MAX_LEN);
  356. else
  357. ret = wcd_spi_read_multi(spi, addr, data,
  358. WCD_SPI_RW_MULTI_MAX_LEN);
  359. if (ret < 0) {
  360. dev_err(&spi->dev,
  361. "%s: %s fail iter(%d) max-write addr (0x%x)\n",
  362. __func__, wcd_spi_xfer_req_str(xfer_req),
  363. loop_cnt, addr);
  364. goto done;
  365. }
  366. addr += WCD_SPI_RW_MULTI_MAX_LEN;
  367. data += WCD_SPI_RW_MULTI_MAX_LEN;
  368. remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
  369. loop_cnt++;
  370. }
  371. /*
  372. * Perform write for max possible data that is multiple
  373. * of the minimum size for multi-write commands.
  374. */
  375. to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
  376. if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
  377. to_xfer > 0) {
  378. if (xfer_req == WCD_SPI_XFER_WRITE)
  379. ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
  380. else
  381. ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
  382. if (ret < 0) {
  383. dev_err(&spi->dev,
  384. "%s: %s fail write addr (0x%x), size (0x%x)\n",
  385. __func__, wcd_spi_xfer_req_str(xfer_req),
  386. addr, to_xfer);
  387. goto done;
  388. }
  389. addr += to_xfer;
  390. data += to_xfer;
  391. remain_size -= to_xfer;
  392. }
  393. /* Perform single writes for the last remaining data */
  394. loop_cnt = 1;
  395. while (remain_size > 0) {
  396. if (xfer_req == WCD_SPI_XFER_WRITE)
  397. ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
  398. else
  399. ret = wcd_spi_read_single(spi, addr, (u32 *) data);
  400. if (ret < 0) {
  401. dev_err(&spi->dev,
  402. "%s: %s fail iter(%d) end-write addr (0x%x)\n",
  403. __func__, wcd_spi_xfer_req_str(xfer_req),
  404. loop_cnt, addr);
  405. goto done;
  406. }
  407. addr += WCD_SPI_WORD_BYTE_CNT;
  408. data += WCD_SPI_WORD_BYTE_CNT;
  409. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  410. loop_cnt++;
  411. }
  412. done:
  413. return ret;
  414. }
  415. static int wcd_spi_cmd_nop(struct spi_device *spi)
  416. {
  417. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  418. u8 *tx_buf = wcd_spi->tx_buf;
  419. tx_buf[0] = WCD_SPI_CMD_NOP;
  420. return spi_write(spi, tx_buf, WCD_SPI_CMD_NOP_LEN);
  421. }
  422. static int wcd_spi_cmd_clkreq(struct spi_device *spi)
  423. {
  424. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  425. struct spi_transfer *xfer = &wcd_spi->xfer1;
  426. u8 *tx_buf = wcd_spi->tx_buf;
  427. u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
  428. WCD_SPI_CMD_CLKREQ,
  429. 0xBA, 0x80, 0x00};
  430. memcpy(tx_buf, cmd, WCD_SPI_CMD_CLKREQ_LEN);
  431. wcd_spi_reinit_xfer(xfer);
  432. xfer->tx_buf = tx_buf;
  433. xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
  434. xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
  435. return spi_sync(spi, &wcd_spi->msg1);
  436. }
  437. static int wcd_spi_cmd_wr_en(struct spi_device *spi)
  438. {
  439. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  440. u8 *tx_buf = wcd_spi->tx_buf;
  441. tx_buf[0] = WCD_SPI_CMD_WREN;
  442. return spi_write(spi, tx_buf, WCD_SPI_CMD_WREN_LEN);
  443. }
  444. static int wcd_spi_cmd_rdsr(struct spi_device *spi,
  445. u32 *rdsr_status)
  446. {
  447. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  448. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  449. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  450. u8 *tx_buf = wcd_spi->tx_buf;
  451. u8 *rx_buf = wcd_spi->rx_buf;
  452. int ret;
  453. tx_buf[0] = WCD_SPI_CMD_RDSR;
  454. wcd_spi_reinit_xfer(tx_xfer);
  455. tx_xfer->tx_buf = tx_buf;
  456. tx_xfer->len = WCD_SPI_OPCODE_LEN;
  457. memset(rx_buf, 0, sizeof(*rdsr_status));
  458. wcd_spi_reinit_xfer(rx_xfer);
  459. rx_xfer->rx_buf = rx_buf;
  460. rx_xfer->len = sizeof(*rdsr_status);
  461. ret = spi_sync(spi, &wcd_spi->msg2);
  462. if (ret < 0) {
  463. dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
  464. __func__, ret);
  465. goto done;
  466. }
  467. *rdsr_status = be32_to_cpu(*((u32*)rx_buf));
  468. dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
  469. __func__, *rdsr_status);
  470. done:
  471. return ret;
  472. }
  473. static int wcd_spi_clk_enable(struct spi_device *spi)
  474. {
  475. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  476. int ret;
  477. u32 rd_status = 0;
  478. ret = wcd_spi_cmd_nop(spi);
  479. if (ret < 0) {
  480. dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
  481. __func__, ret);
  482. goto done;
  483. }
  484. ret = wcd_spi_cmd_clkreq(spi);
  485. if (ret < 0) {
  486. dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
  487. __func__, ret);
  488. goto done;
  489. }
  490. ret = wcd_spi_cmd_nop(spi);
  491. if (ret < 0) {
  492. dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
  493. __func__, ret);
  494. goto done;
  495. }
  496. wcd_spi_cmd_rdsr(spi, &rd_status);
  497. /*
  498. * Read status zero means reads are not
  499. * happenning on the bus, possibly because
  500. * clock request failed.
  501. */
  502. if (rd_status) {
  503. set_bit(WCD_SPI_CLK_STATE_ENABLED,
  504. &wcd_spi->status_mask);
  505. } else {
  506. dev_err(&spi->dev, "%s: RDSR status is zero\n",
  507. __func__);
  508. ret = -EIO;
  509. }
  510. done:
  511. return ret;
  512. }
  513. static int wcd_spi_clk_disable(struct spi_device *spi)
  514. {
  515. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  516. int ret;
  517. ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
  518. if (ret < 0)
  519. dev_err(&spi->dev, "%s: Failed, err = %d\n",
  520. __func__, ret);
  521. /*
  522. * clear this bit even if clock disable failed
  523. * as the source clocks might get turned off.
  524. */
  525. clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
  526. return ret;
  527. }
  528. static int wcd_spi_clk_ctrl(struct spi_device *spi,
  529. bool request, u32 flags)
  530. {
  531. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  532. int ret = 0;
  533. const char *delay_str;
  534. delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
  535. "delayed" : "immediate";
  536. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  537. /* Reject any unbalanced disable request */
  538. if (wcd_spi->clk_users < 0 ||
  539. (!request && wcd_spi->clk_users == 0)) {
  540. dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
  541. __func__, wcd_spi->clk_users,
  542. request ? "enable" : "disable");
  543. ret = -EINVAL;
  544. /* Reset the clk_users to 0 */
  545. wcd_spi->clk_users = 0;
  546. goto done;
  547. }
  548. if (request == WCD_SPI_CLK_ENABLE) {
  549. /*
  550. * If the SPI bus is suspended, then return error
  551. * as the transaction cannot be completed.
  552. */
  553. if (wcd_spi_is_suspended(wcd_spi)) {
  554. dev_err(&spi->dev,
  555. "%s: SPI suspended, cannot enable clk\n",
  556. __func__);
  557. ret = -EIO;
  558. goto done;
  559. }
  560. /* Cancel the disable clk work */
  561. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  562. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  563. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  564. wcd_spi->clk_users++;
  565. /*
  566. * If clk state is already set,
  567. * then clk wasnt really disabled
  568. */
  569. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  570. goto done;
  571. else if (wcd_spi->clk_users == 1)
  572. ret = wcd_spi_clk_enable(spi);
  573. } else {
  574. wcd_spi->clk_users--;
  575. /* Clock is still voted for */
  576. if (wcd_spi->clk_users > 0)
  577. goto done;
  578. /*
  579. * If we are here, clk_users must be 0 and needs
  580. * to be disabled. Call the disable based on the
  581. * flags.
  582. */
  583. if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
  584. schedule_delayed_work(&wcd_spi->clk_dwork,
  585. msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
  586. } else {
  587. ret = wcd_spi_clk_disable(spi);
  588. if (ret < 0)
  589. dev_err(&spi->dev,
  590. "%s: Failed to disable clk err = %d\n",
  591. __func__, ret);
  592. }
  593. }
  594. done:
  595. dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
  596. __func__, wcd_spi->clk_users, request ? "enable" : "disable",
  597. request ? "" : delay_str);
  598. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  599. return ret;
  600. }
  601. static int wcd_spi_init(struct spi_device *spi)
  602. {
  603. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  604. int ret;
  605. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  606. WCD_SPI_CLK_FLAG_IMMEDIATE);
  607. if (ret < 0)
  608. goto done;
  609. ret = wcd_spi_cmd_wr_en(spi);
  610. if (ret < 0)
  611. goto err_wr_en;
  612. /*
  613. * In case spi_init is called after component deinit,
  614. * it is possible hardware register state is also reset.
  615. * Sync the regcache here so hardware state is updated
  616. * to reflect the cache.
  617. */
  618. regcache_sync(wcd_spi->regmap);
  619. regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
  620. 0x0F3D0800);
  621. /* Write the MTU to max allowed size */
  622. regmap_update_bits(wcd_spi->regmap,
  623. WCD_SPI_SLAVE_TRNS_LEN,
  624. 0xFFFF0000, 0xFFFF0000);
  625. err_wr_en:
  626. wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  627. WCD_SPI_CLK_FLAG_IMMEDIATE);
  628. done:
  629. return ret;
  630. }
  631. static void wcd_spi_clk_work(struct work_struct *work)
  632. {
  633. struct delayed_work *dwork;
  634. struct wcd_spi_priv *wcd_spi;
  635. struct spi_device *spi;
  636. int ret;
  637. dwork = to_delayed_work(work);
  638. wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
  639. spi = wcd_spi->spi;
  640. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  641. ret = wcd_spi_clk_disable(spi);
  642. if (ret < 0)
  643. dev_err(&spi->dev,
  644. "%s: Failed to disable clk, err = %d\n",
  645. __func__, ret);
  646. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  647. }
  648. static int __wcd_spi_data_xfer(struct spi_device *spi,
  649. struct wcd_spi_msg *msg,
  650. enum xfer_request xfer_req)
  651. {
  652. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  653. int ret;
  654. /* Check for minimum alignment requirements */
  655. if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
  656. dev_err(&spi->dev,
  657. "%s addr 0x%x is not aligned to 0x%x\n",
  658. __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
  659. return -EINVAL;
  660. } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
  661. dev_err(&spi->dev,
  662. "%s len 0x%zx is not multiple of %d\n",
  663. __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
  664. return -EINVAL;
  665. }
  666. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
  667. if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
  668. if (xfer_req == WCD_SPI_XFER_WRITE)
  669. ret = wcd_spi_write_single(spi, msg->remote_addr,
  670. (*((u32 *)msg->data)));
  671. else
  672. ret = wcd_spi_read_single(spi, msg->remote_addr,
  673. (u32 *) msg->data);
  674. } else {
  675. ret = wcd_spi_transfer_split(spi, msg, xfer_req);
  676. }
  677. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
  678. return ret;
  679. }
  680. static int wcd_spi_data_xfer(struct spi_device *spi,
  681. struct wcd_spi_msg *msg,
  682. enum xfer_request req)
  683. {
  684. int ret, ret1;
  685. if (msg->len <= 0) {
  686. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  687. __func__, msg->len);
  688. return -EINVAL;
  689. }
  690. /* Request for clock */
  691. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  692. WCD_SPI_CLK_FLAG_IMMEDIATE);
  693. if (ret < 0) {
  694. dev_err(&spi->dev, "%s: clk enable failed %d\n",
  695. __func__, ret);
  696. goto done;
  697. }
  698. /* Perform the transaction */
  699. ret = __wcd_spi_data_xfer(spi, msg, req);
  700. if (ret < 0)
  701. dev_err(&spi->dev,
  702. "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
  703. __func__, wcd_spi_xfer_req_str(req),
  704. msg->remote_addr, msg->len, ret);
  705. /* Release the clock even if xfer failed */
  706. ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  707. WCD_SPI_CLK_FLAG_DELAYED);
  708. if (ret1 < 0)
  709. dev_err(&spi->dev, "%s: clk disable failed %d\n",
  710. __func__, ret1);
  711. done:
  712. return ret;
  713. }
  714. /*
  715. * wcd_spi_data_write: Write data to WCD SPI
  716. * @spi: spi_device struct
  717. * @msg: msg that needs to be written to WCD
  718. *
  719. * This API writes length of data to address specified. These details
  720. * about the write are encapsulated in @msg. Write size should be multiple
  721. * of 4 bytes and write address should be 4-byte aligned.
  722. */
  723. static int wcd_spi_data_write(struct spi_device *spi,
  724. struct wcd_spi_msg *msg)
  725. {
  726. if (!spi || !msg) {
  727. pr_err("%s: Invalid %s\n", __func__,
  728. (!spi) ? "spi device" : "msg");
  729. return -EINVAL;
  730. }
  731. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
  732. __func__, msg->remote_addr, msg->len);
  733. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
  734. }
  735. /*
  736. * wcd_spi_data_read: Read data from WCD SPI
  737. * @spi: spi_device struct
  738. * @msg: msg that needs to be read from WCD
  739. *
  740. * This API reads length of data from address specified. These details
  741. * about the read are encapsulated in @msg. Read size should be multiple
  742. * of 4 bytes and read address should be 4-byte aligned.
  743. */
  744. static int wcd_spi_data_read(struct spi_device *spi,
  745. struct wcd_spi_msg *msg)
  746. {
  747. if (!spi || !msg) {
  748. pr_err("%s: Invalid %s\n", __func__,
  749. (!spi) ? "spi device" : "msg");
  750. return -EINVAL;
  751. }
  752. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
  753. __func__, msg->remote_addr, msg->len);
  754. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
  755. }
  756. static int wdsp_spi_dload_section(struct spi_device *spi,
  757. void *data)
  758. {
  759. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  760. struct wdsp_img_section *sec = data;
  761. struct wcd_spi_msg msg;
  762. int ret;
  763. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  764. __func__, sec->addr, sec->size);
  765. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  766. msg.data = sec->data;
  767. msg.len = sec->size;
  768. ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
  769. if (ret < 0)
  770. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  771. __func__, msg.remote_addr, msg.len);
  772. return ret;
  773. }
  774. static int wdsp_spi_read_section(struct spi_device *spi, void *data)
  775. {
  776. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  777. struct wdsp_img_section *sec = data;
  778. struct wcd_spi_msg msg;
  779. int ret;
  780. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  781. msg.data = sec->data;
  782. msg.len = sec->size;
  783. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  784. __func__, msg.remote_addr, msg.len);
  785. ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
  786. if (ret < 0)
  787. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  788. __func__, msg.remote_addr, msg.len);
  789. return ret;
  790. }
  791. static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
  792. enum wdsp_event_type event,
  793. void *data)
  794. {
  795. struct spi_device *spi = to_spi_device(dev);
  796. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  797. struct wcd_spi_ops *spi_ops;
  798. int ret = 0;
  799. dev_dbg(&spi->dev, "%s: event type %d\n",
  800. __func__, event);
  801. switch (event) {
  802. case WDSP_EVENT_POST_SHUTDOWN:
  803. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  804. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  805. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  806. wcd_spi_clk_disable(spi);
  807. wcd_spi->clk_users = 0;
  808. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  809. break;
  810. case WDSP_EVENT_PRE_DLOAD_CODE:
  811. case WDSP_EVENT_PRE_DLOAD_DATA:
  812. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  813. WCD_SPI_CLK_FLAG_IMMEDIATE);
  814. if (ret < 0)
  815. dev_err(&spi->dev, "%s: clk_req failed %d\n",
  816. __func__, ret);
  817. break;
  818. case WDSP_EVENT_POST_DLOAD_CODE:
  819. case WDSP_EVENT_POST_DLOAD_DATA:
  820. case WDSP_EVENT_DLOAD_FAILED:
  821. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  822. WCD_SPI_CLK_FLAG_IMMEDIATE);
  823. if (ret < 0)
  824. dev_err(&spi->dev, "%s: clk unvote failed %d\n",
  825. __func__, ret);
  826. break;
  827. case WDSP_EVENT_DLOAD_SECTION:
  828. ret = wdsp_spi_dload_section(spi, data);
  829. break;
  830. case WDSP_EVENT_READ_SECTION:
  831. ret = wdsp_spi_read_section(spi, data);
  832. break;
  833. case WDSP_EVENT_SUSPEND:
  834. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  835. if (!wcd_spi_can_suspend(wcd_spi))
  836. ret = -EBUSY;
  837. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  838. break;
  839. case WDSP_EVENT_RESUME:
  840. ret = wcd_spi_wait_for_resume(wcd_spi);
  841. break;
  842. case WDSP_EVENT_GET_DEVOPS:
  843. if (!data) {
  844. dev_err(&spi->dev, "%s: invalid data\n",
  845. __func__);
  846. ret = -EINVAL;
  847. break;
  848. }
  849. spi_ops = (struct wcd_spi_ops *) data;
  850. spi_ops->spi_dev = spi;
  851. spi_ops->read_dev = wcd_spi_data_read;
  852. spi_ops->write_dev = wcd_spi_data_write;
  853. break;
  854. default:
  855. dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
  856. __func__, event);
  857. break;
  858. }
  859. return ret;
  860. }
  861. static int wcd_spi_bus_gwrite(void *context, const void *reg,
  862. size_t reg_len, const void *val,
  863. size_t val_len)
  864. {
  865. struct device *dev = context;
  866. struct spi_device *spi = to_spi_device(dev);
  867. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  868. u8 *tx_buf = wcd_spi->tx_buf;
  869. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  870. val_len != wcd_spi->val_bytes) {
  871. dev_err(&spi->dev,
  872. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  873. __func__, reg_len, val_len);
  874. return -EINVAL;
  875. }
  876. memset(tx_buf, 0, WCD_SPI_CMD_IRW_LEN);
  877. tx_buf[0] = WCD_SPI_CMD_IRW;
  878. tx_buf[1] = *((u8 *)reg);
  879. memcpy(tx_buf + WCD_SPI_OPCODE_LEN + reg_len,
  880. val, val_len);
  881. return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
  882. }
  883. static int wcd_spi_bus_write(void *context, const void *data,
  884. size_t count)
  885. {
  886. struct device *dev = context;
  887. struct spi_device *spi = to_spi_device(dev);
  888. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  889. if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
  890. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  891. __func__, count);
  892. WARN_ON(1);
  893. return -EINVAL;
  894. }
  895. return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
  896. data + wcd_spi->reg_bytes,
  897. count - wcd_spi->reg_bytes);
  898. }
  899. static int wcd_spi_bus_read(void *context, const void *reg,
  900. size_t reg_len, void *val,
  901. size_t val_len)
  902. {
  903. struct device *dev = context;
  904. struct spi_device *spi = to_spi_device(dev);
  905. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  906. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  907. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  908. u8 *tx_buf = wcd_spi->tx_buf;
  909. u8 *rx_buf = wcd_spi->rx_buf;
  910. int ret = 0;
  911. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  912. val_len != wcd_spi->val_bytes) {
  913. dev_err(&spi->dev,
  914. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  915. __func__, reg_len, val_len);
  916. return -EINVAL;
  917. }
  918. memset(tx_buf, 0, WCD_SPI_CMD_IRR_LEN);
  919. tx_buf[0] = WCD_SPI_CMD_IRR;
  920. tx_buf[1] = *((u8 *)reg);
  921. wcd_spi_reinit_xfer(tx_xfer);
  922. tx_xfer->tx_buf = tx_buf;
  923. tx_xfer->rx_buf = NULL;
  924. tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
  925. wcd_spi_reinit_xfer(rx_xfer);
  926. rx_xfer->tx_buf = NULL;
  927. rx_xfer->rx_buf = rx_buf;
  928. rx_xfer->len = val_len;
  929. ret = spi_sync(spi, &wcd_spi->msg2);
  930. if (ret) {
  931. dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
  932. __func__, ret);
  933. goto done;
  934. }
  935. memcpy(val, rx_buf, val_len);
  936. done:
  937. return ret;
  938. }
  939. static struct regmap_bus wcd_spi_regmap_bus = {
  940. .write = wcd_spi_bus_write,
  941. .gather_write = wcd_spi_bus_gwrite,
  942. .read = wcd_spi_bus_read,
  943. .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
  944. .val_format_endian_default = REGMAP_ENDIAN_BIG,
  945. };
  946. static int wcd_spi_state_show(struct seq_file *f, void *ptr)
  947. {
  948. struct spi_device *spi = f->private;
  949. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  950. const char *clk_state, *clk_mutex, *xfer_mutex;
  951. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  952. clk_state = "enabled";
  953. else
  954. clk_state = "disabled";
  955. clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
  956. "locked" : "unlocked";
  957. xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
  958. "locked" : "unlocked";
  959. seq_printf(f, "clk_state = %s\nclk_users = %d\n"
  960. "clk_mutex = %s\nxfer_mutex = %s\n",
  961. clk_state, wcd_spi->clk_users, clk_mutex,
  962. xfer_mutex);
  963. return 0;
  964. }
  965. static int wcd_spi_state_open(struct inode *inode, struct file *file)
  966. {
  967. return single_open(file, wcd_spi_state_show, inode->i_private);
  968. }
  969. static const struct file_operations state_fops = {
  970. .open = wcd_spi_state_open,
  971. .read = seq_read,
  972. .llseek = seq_lseek,
  973. .release = single_release,
  974. };
  975. static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
  976. size_t count, loff_t *ppos)
  977. {
  978. struct spi_device *spi = file->private_data;
  979. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  980. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  981. struct wcd_spi_msg msg;
  982. ssize_t buf_size, read_count = 0;
  983. char *buf;
  984. int ret;
  985. if (*ppos < 0 || !count)
  986. return -EINVAL;
  987. if (dbg_data->size == 0 || dbg_data->addr == 0) {
  988. dev_err(&spi->dev,
  989. "%s: Invalid request, size = %u, addr = 0x%x\n",
  990. __func__, dbg_data->size, dbg_data->addr);
  991. return 0;
  992. }
  993. buf_size = count < dbg_data->size ? count : dbg_data->size;
  994. buf = kzalloc(buf_size, GFP_KERNEL);
  995. if (!buf)
  996. return -ENOMEM;
  997. msg.data = buf;
  998. msg.remote_addr = dbg_data->addr;
  999. msg.len = buf_size;
  1000. msg.flags = 0;
  1001. ret = wcd_spi_data_read(spi, &msg);
  1002. if (ret < 0) {
  1003. dev_err(&spi->dev,
  1004. "%s: Failed to read %zu bytes from addr 0x%x\n",
  1005. __func__, buf_size, msg.remote_addr);
  1006. goto done;
  1007. }
  1008. read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
  1009. done:
  1010. kfree(buf);
  1011. if (ret < 0)
  1012. return ret;
  1013. else
  1014. return read_count;
  1015. }
  1016. static const struct file_operations mem_read_fops = {
  1017. .open = simple_open,
  1018. .read = wcd_spi_debugfs_mem_read,
  1019. };
  1020. static int wcd_spi_debugfs_init(struct spi_device *spi)
  1021. {
  1022. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1023. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1024. int rc = 0;
  1025. dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
  1026. if (IS_ERR_OR_NULL(dbg_data->dir)) {
  1027. dbg_data->dir = NULL;
  1028. rc = -ENODEV;
  1029. goto done;
  1030. }
  1031. debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
  1032. debugfs_create_u32("addr", 0644, dbg_data->dir,
  1033. &dbg_data->addr);
  1034. debugfs_create_u32("size", 0644, dbg_data->dir,
  1035. &dbg_data->size);
  1036. debugfs_create_file("mem_read", 0444, dbg_data->dir,
  1037. spi, &mem_read_fops);
  1038. done:
  1039. return rc;
  1040. }
  1041. static const struct reg_default wcd_spi_defaults[] = {
  1042. {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
  1043. {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
  1044. {WCD_SPI_SLAVE_STATUS, 0x80100000},
  1045. {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
  1046. {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
  1047. {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
  1048. {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
  1049. {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
  1050. {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
  1051. {WCD_SPI_SLAVE_TX, 0x00000000},
  1052. {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
  1053. {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
  1054. {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
  1055. {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
  1056. {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
  1057. {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
  1058. {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
  1059. {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
  1060. {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
  1061. {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
  1062. {WCD_SPI_SLAVE_GENERICS, 0x80000000},
  1063. {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
  1064. };
  1065. static bool wcd_spi_is_volatile_reg(struct device *dev,
  1066. unsigned int reg)
  1067. {
  1068. switch (reg) {
  1069. case WCD_SPI_SLAVE_SANITY:
  1070. case WCD_SPI_SLAVE_STATUS:
  1071. case WCD_SPI_SLAVE_IRQ_STATUS:
  1072. case WCD_SPI_SLAVE_TX:
  1073. case WCD_SPI_SLAVE_SW_RST_IRQ:
  1074. case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
  1075. case WCD_SPI_SLAVE_FIFO_LEVEL:
  1076. case WCD_SPI_SLAVE_GENERICS:
  1077. return true;
  1078. }
  1079. return false;
  1080. }
  1081. static bool wcd_spi_is_readable_reg(struct device *dev,
  1082. unsigned int reg)
  1083. {
  1084. switch (reg) {
  1085. case WCD_SPI_SLAVE_SW_RESET:
  1086. case WCD_SPI_SLAVE_IRQ_CLR:
  1087. case WCD_SPI_SLAVE_IRQ_FORCE:
  1088. return false;
  1089. }
  1090. return true;
  1091. }
  1092. static struct regmap_config wcd_spi_regmap_cfg = {
  1093. .reg_bits = 8,
  1094. .val_bits = 32,
  1095. .cache_type = REGCACHE_RBTREE,
  1096. .reg_defaults = wcd_spi_defaults,
  1097. .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
  1098. .max_register = WCD_SPI_MAX_REGISTER,
  1099. .volatile_reg = wcd_spi_is_volatile_reg,
  1100. .readable_reg = wcd_spi_is_readable_reg,
  1101. };
  1102. static int wdsp_spi_init(struct device *dev, void *priv_data)
  1103. {
  1104. struct spi_device *spi = to_spi_device(dev);
  1105. int ret;
  1106. ret = wcd_spi_init(spi);
  1107. if (ret < 0)
  1108. dev_err(&spi->dev, "%s: Init failed, err = %d\n",
  1109. __func__, ret);
  1110. return ret;
  1111. }
  1112. static int wdsp_spi_deinit(struct device *dev, void *priv_data)
  1113. {
  1114. struct spi_device *spi = to_spi_device(dev);
  1115. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1116. /*
  1117. * Deinit means the hardware is reset. Mark the cache
  1118. * as dirty here, so init will sync the cache
  1119. */
  1120. regcache_mark_dirty(wcd_spi->regmap);
  1121. return 0;
  1122. }
  1123. static struct wdsp_cmpnt_ops wdsp_spi_ops = {
  1124. .init = wdsp_spi_init,
  1125. .deinit = wdsp_spi_deinit,
  1126. .event_handler = wdsp_spi_event_handler,
  1127. };
  1128. static int wcd_spi_component_bind(struct device *dev,
  1129. struct device *master,
  1130. void *data)
  1131. {
  1132. struct spi_device *spi = to_spi_device(dev);
  1133. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1134. int ret = 0;
  1135. wcd_spi->m_dev = master;
  1136. wcd_spi->m_ops = data;
  1137. if (wcd_spi->m_ops &&
  1138. wcd_spi->m_ops->register_cmpnt_ops)
  1139. ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
  1140. wcd_spi,
  1141. &wdsp_spi_ops);
  1142. if (ret) {
  1143. dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
  1144. __func__, ret);
  1145. goto done;
  1146. }
  1147. wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
  1148. wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
  1149. wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
  1150. &spi->dev, &wcd_spi_regmap_cfg);
  1151. if (IS_ERR(wcd_spi->regmap)) {
  1152. ret = PTR_ERR(wcd_spi->regmap);
  1153. dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
  1154. __func__, ret);
  1155. goto done;
  1156. }
  1157. if (wcd_spi_debugfs_init(spi))
  1158. dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
  1159. spi_message_init(&wcd_spi->msg1);
  1160. spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
  1161. spi_message_init(&wcd_spi->msg2);
  1162. spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
  1163. spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
  1164. /* Pre-allocate the buffers */
  1165. wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
  1166. GFP_KERNEL | GFP_DMA);
  1167. if (!wcd_spi->tx_buf) {
  1168. ret = -ENOMEM;
  1169. goto done;
  1170. }
  1171. wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
  1172. GFP_KERNEL | GFP_DMA);
  1173. if (!wcd_spi->rx_buf) {
  1174. kfree(wcd_spi->tx_buf);
  1175. wcd_spi->tx_buf = NULL;
  1176. ret = -ENOMEM;
  1177. goto done;
  1178. }
  1179. done:
  1180. return ret;
  1181. }
  1182. static void wcd_spi_component_unbind(struct device *dev,
  1183. struct device *master,
  1184. void *data)
  1185. {
  1186. struct spi_device *spi = to_spi_device(dev);
  1187. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1188. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1189. debugfs_remove_recursive(dbg_data->dir);
  1190. dbg_data->dir = NULL;
  1191. wcd_spi->m_dev = NULL;
  1192. wcd_spi->m_ops = NULL;
  1193. spi_transfer_del(&wcd_spi->xfer1);
  1194. spi_transfer_del(&wcd_spi->xfer2[0]);
  1195. spi_transfer_del(&wcd_spi->xfer2[1]);
  1196. kfree(wcd_spi->tx_buf);
  1197. kfree(wcd_spi->rx_buf);
  1198. wcd_spi->tx_buf = NULL;
  1199. wcd_spi->rx_buf = NULL;
  1200. }
  1201. static const struct component_ops wcd_spi_component_ops = {
  1202. .bind = wcd_spi_component_bind,
  1203. .unbind = wcd_spi_component_unbind,
  1204. };
  1205. static int wcd_spi_probe(struct spi_device *spi)
  1206. {
  1207. struct wcd_spi_priv *wcd_spi;
  1208. int ret = 0;
  1209. wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
  1210. GFP_KERNEL);
  1211. if (!wcd_spi)
  1212. return -ENOMEM;
  1213. ret = of_property_read_u32(spi->dev.of_node,
  1214. "qcom,mem-base-addr",
  1215. &wcd_spi->mem_base_addr);
  1216. if (ret < 0) {
  1217. dev_err(&spi->dev, "%s: Missing %s DT entry",
  1218. __func__, "qcom,mem-base-addr");
  1219. goto err_ret;
  1220. }
  1221. dev_dbg(&spi->dev,
  1222. "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
  1223. mutex_init(&wcd_spi->clk_mutex);
  1224. mutex_init(&wcd_spi->xfer_mutex);
  1225. INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
  1226. init_completion(&wcd_spi->resume_comp);
  1227. wcd_spi->spi = spi;
  1228. spi_set_drvdata(spi, wcd_spi);
  1229. ret = component_add(&spi->dev, &wcd_spi_component_ops);
  1230. if (ret) {
  1231. dev_err(&spi->dev, "%s: component_add failed err = %d\n",
  1232. __func__, ret);
  1233. goto err_component_add;
  1234. }
  1235. return ret;
  1236. err_component_add:
  1237. mutex_destroy(&wcd_spi->clk_mutex);
  1238. mutex_destroy(&wcd_spi->xfer_mutex);
  1239. err_ret:
  1240. devm_kfree(&spi->dev, wcd_spi);
  1241. spi_set_drvdata(spi, NULL);
  1242. return ret;
  1243. }
  1244. static int wcd_spi_remove(struct spi_device *spi)
  1245. {
  1246. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1247. component_del(&spi->dev, &wcd_spi_component_ops);
  1248. mutex_destroy(&wcd_spi->clk_mutex);
  1249. mutex_destroy(&wcd_spi->xfer_mutex);
  1250. devm_kfree(&spi->dev, wcd_spi);
  1251. spi_set_drvdata(spi, NULL);
  1252. return 0;
  1253. }
  1254. #ifdef CONFIG_PM
  1255. static int wcd_spi_suspend(struct device *dev)
  1256. {
  1257. struct spi_device *spi = to_spi_device(dev);
  1258. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1259. int rc = 0;
  1260. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1261. if (!wcd_spi_can_suspend(wcd_spi)) {
  1262. rc = -EBUSY;
  1263. goto done;
  1264. }
  1265. /*
  1266. * If we are here, it is okay to let the suspend go
  1267. * through for this driver. But, still need to notify
  1268. * the master to make sure all other components can suspend
  1269. * as well.
  1270. */
  1271. if (wcd_spi->m_dev && wcd_spi->m_ops &&
  1272. wcd_spi->m_ops->suspend) {
  1273. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1274. rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
  1275. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1276. }
  1277. if (rc == 0)
  1278. set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1279. else
  1280. dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
  1281. __func__, rc);
  1282. done:
  1283. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1284. return rc;
  1285. }
  1286. static int wcd_spi_resume(struct device *dev)
  1287. {
  1288. struct spi_device *spi = to_spi_device(dev);
  1289. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1290. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1291. clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1292. complete(&wcd_spi->resume_comp);
  1293. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1294. return 0;
  1295. }
  1296. static const struct dev_pm_ops wcd_spi_pm_ops = {
  1297. .suspend = wcd_spi_suspend,
  1298. .resume = wcd_spi_resume,
  1299. };
  1300. #endif
  1301. static const struct of_device_id wcd_spi_of_match[] = {
  1302. { .compatible = "qcom,wcd-spi-v2", },
  1303. { }
  1304. };
  1305. MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
  1306. static struct spi_driver wcd_spi_driver = {
  1307. .driver = {
  1308. .name = "wcd-spi-v2",
  1309. .of_match_table = wcd_spi_of_match,
  1310. #ifdef CONFIG_PM
  1311. .pm = &wcd_spi_pm_ops,
  1312. #endif
  1313. },
  1314. .probe = wcd_spi_probe,
  1315. .remove = wcd_spi_remove,
  1316. };
  1317. module_spi_driver(wcd_spi_driver);
  1318. MODULE_DESCRIPTION("WCD SPI driver");
  1319. MODULE_LICENSE("GPL v2");