wcd-spi.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/of.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/delay.h>
  10. #include <linux/bitops.h>
  11. #include <linux/spi/spi.h>
  12. #include <linux/regmap.h>
  13. #include <linux/component.h>
  14. #include <linux/ratelimit.h>
  15. #include <sound/wcd-dsp-mgr.h>
  16. #include <sound/wcd-spi.h>
  17. #include "wcd-spi-registers.h"
  18. /* Byte manipulations */
  19. #define SHIFT_1_BYTES (8)
  20. #define SHIFT_2_BYTES (16)
  21. #define SHIFT_3_BYTES (24)
  22. /* Command opcodes */
  23. #define WCD_SPI_CMD_NOP (0x00)
  24. #define WCD_SPI_CMD_WREN (0x06)
  25. #define WCD_SPI_CMD_CLKREQ (0xDA)
  26. #define WCD_SPI_CMD_RDSR (0x05)
  27. #define WCD_SPI_CMD_IRR (0x81)
  28. #define WCD_SPI_CMD_IRW (0x82)
  29. #define WCD_SPI_CMD_MIOR (0x83)
  30. #define WCD_SPI_CMD_FREAD (0x0B)
  31. #define WCD_SPI_CMD_MIOW (0x02)
  32. #define WCD_SPI_WRITE_FRAME_OPCODE \
  33. (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
  34. #define WCD_SPI_READ_FRAME_OPCODE \
  35. (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
  36. #define WCD_SPI_FREAD_FRAME_OPCODE \
  37. (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
  38. /* Command lengths */
  39. #define WCD_SPI_OPCODE_LEN (0x01)
  40. #define WCD_SPI_CMD_NOP_LEN (0x01)
  41. #define WCD_SPI_CMD_WREN_LEN (0x01)
  42. #define WCD_SPI_CMD_CLKREQ_LEN (0x04)
  43. #define WCD_SPI_CMD_IRR_LEN (0x04)
  44. #define WCD_SPI_CMD_IRW_LEN (0x06)
  45. #define WCD_SPI_WRITE_SINGLE_LEN (0x08)
  46. #define WCD_SPI_READ_SINGLE_LEN (0x13)
  47. #define WCD_SPI_CMD_FREAD_LEN (0x13)
  48. /* Command delays */
  49. #define WCD_SPI_CLKREQ_DELAY_USECS (500)
  50. #define WCD_SPI_CLK_OFF_TIMER_MS (500)
  51. #define WCD_SPI_RESUME_TIMEOUT_MS 100
  52. /* Command masks */
  53. #define WCD_CMD_ADDR_MASK \
  54. (0xFF | \
  55. (0xFF << SHIFT_1_BYTES) | \
  56. (0xFF << SHIFT_2_BYTES))
  57. /* Clock ctrl request related */
  58. #define WCD_SPI_CLK_ENABLE true
  59. #define WCD_SPI_CLK_DISABLE false
  60. #define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
  61. #define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
  62. /* Internal addresses */
  63. #define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
  64. /* Word sizes and min/max lengths */
  65. #define WCD_SPI_WORD_BYTE_CNT (4)
  66. #define WCD_SPI_RW_MULTI_MIN_LEN (16)
  67. /* Max size is 32 bytes less than 64Kbytes */
  68. #define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
  69. /*
  70. * Max size for the pre-allocated buffers is the max
  71. * possible read/write length + 32 bytes for the SPI
  72. * read/write command header itself.
  73. */
  74. #define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
  75. /* Alignment requirements */
  76. #define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
  77. #define WCD_SPI_RW_MULTI_ALIGN (16)
  78. /* Status mask bits */
  79. #define WCD_SPI_CLK_STATE_ENABLED BIT(0)
  80. #define WCD_SPI_IS_SUSPENDED BIT(1)
  81. /* Locking related */
  82. #define WCD_SPI_MUTEX_LOCK(spi, lock) \
  83. { \
  84. dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
  85. __func__, __stringify_1(lock)); \
  86. mutex_lock(&lock); \
  87. }
  88. #define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
  89. { \
  90. dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
  91. __func__, __stringify_1(lock)); \
  92. mutex_unlock(&lock); \
  93. }
  94. struct wcd_spi_debug_data {
  95. struct dentry *dir;
  96. u32 addr;
  97. u32 size;
  98. };
  99. struct wcd_spi_priv {
  100. struct spi_device *spi;
  101. u32 mem_base_addr;
  102. struct regmap *regmap;
  103. /* Message for single transfer */
  104. struct spi_message msg1;
  105. struct spi_transfer xfer1;
  106. /* Message for two transfers */
  107. struct spi_message msg2;
  108. struct spi_transfer xfer2[2];
  109. /* Register access related */
  110. u32 reg_bytes;
  111. u32 val_bytes;
  112. /* Clock requests related */
  113. struct mutex clk_mutex;
  114. int clk_users;
  115. unsigned long status_mask;
  116. struct delayed_work clk_dwork;
  117. /* Transaction related */
  118. struct mutex xfer_mutex;
  119. struct device *m_dev;
  120. struct wdsp_mgr_ops *m_ops;
  121. /* Debugfs related information */
  122. struct wcd_spi_debug_data debug_data;
  123. /* Completion object to indicate system resume completion */
  124. struct completion resume_comp;
  125. /* Buffers to hold memory used for transfers */
  126. void *tx_buf;
  127. void *rx_buf;
  128. };
  129. enum xfer_request {
  130. WCD_SPI_XFER_WRITE,
  131. WCD_SPI_XFER_READ,
  132. };
  133. static char *wcd_spi_xfer_req_str(enum xfer_request req)
  134. {
  135. if (req == WCD_SPI_XFER_WRITE)
  136. return "xfer_write";
  137. else if (req == WCD_SPI_XFER_READ)
  138. return "xfer_read";
  139. else
  140. return "xfer_invalid";
  141. }
  142. static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
  143. {
  144. xfer->tx_buf = NULL;
  145. xfer->rx_buf = NULL;
  146. xfer->delay_usecs = 0;
  147. xfer->len = 0;
  148. }
  149. static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
  150. {
  151. return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  152. }
  153. static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
  154. {
  155. struct spi_device *spi = wcd_spi->spi;
  156. if (wcd_spi->clk_users > 0 ||
  157. test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
  158. dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
  159. __func__, wcd_spi->clk_users);
  160. return false;
  161. }
  162. return true;
  163. }
  164. static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
  165. {
  166. struct spi_device *spi = wcd_spi->spi;
  167. int rc = 0;
  168. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  169. /* If the system is already in resumed state, return right away */
  170. if (!wcd_spi_is_suspended(wcd_spi))
  171. goto done;
  172. /* If suspended then wait for resume to happen */
  173. reinit_completion(&wcd_spi->resume_comp);
  174. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  175. rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
  176. msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
  177. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  178. if (rc == 0) {
  179. dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
  180. __func__, WCD_SPI_RESUME_TIMEOUT_MS);
  181. rc = -EIO;
  182. goto done;
  183. }
  184. dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
  185. rc = 0;
  186. done:
  187. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  188. return rc;
  189. }
  190. static int wcd_spi_read_single(struct spi_device *spi,
  191. u32 remote_addr, u32 *val)
  192. {
  193. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  194. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  195. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  196. u8 *tx_buf = wcd_spi->tx_buf;
  197. u8 *rx_buf = wcd_spi->rx_buf;
  198. u32 frame = 0;
  199. int ret;
  200. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
  201. __func__, remote_addr);
  202. if (!tx_buf) {
  203. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  204. __func__);
  205. return -ENOMEM;
  206. }
  207. frame |= WCD_SPI_READ_FRAME_OPCODE;
  208. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  209. wcd_spi_reinit_xfer(tx_xfer);
  210. frame = cpu_to_be32(frame);
  211. memcpy(tx_buf, &frame, sizeof(frame));
  212. tx_xfer->tx_buf = tx_buf;
  213. tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
  214. wcd_spi_reinit_xfer(rx_xfer);
  215. rx_xfer->rx_buf = rx_buf;
  216. rx_xfer->len = sizeof(*val);
  217. ret = spi_sync(spi, &wcd_spi->msg2);
  218. if (ret)
  219. dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
  220. __func__, ret);
  221. else
  222. memcpy((u8*) val, rx_buf, sizeof(*val));
  223. return ret;
  224. }
  225. static int wcd_spi_read_multi(struct spi_device *spi,
  226. u32 remote_addr, u8 *data,
  227. size_t len)
  228. {
  229. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  230. struct spi_transfer *xfer = &wcd_spi->xfer1;
  231. u8 *tx_buf = wcd_spi->tx_buf;
  232. u8 *rx_buf = wcd_spi->rx_buf;
  233. u32 frame = 0;
  234. int ret;
  235. dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
  236. __func__, remote_addr, len);
  237. frame |= WCD_SPI_FREAD_FRAME_OPCODE;
  238. frame |= remote_addr & WCD_CMD_ADDR_MASK;
  239. if (!tx_buf || !rx_buf) {
  240. dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
  241. (!tx_buf) ? "tx_buf" : "rx_buf");
  242. return -ENOMEM;
  243. }
  244. wcd_spi_reinit_xfer(xfer);
  245. frame = cpu_to_be32(frame);
  246. memcpy(tx_buf, &frame, sizeof(frame));
  247. xfer->tx_buf = tx_buf;
  248. xfer->rx_buf = rx_buf;
  249. xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
  250. ret = spi_sync(spi, &wcd_spi->msg1);
  251. if (ret) {
  252. dev_err(&spi->dev, "%s: failed, err = %d\n",
  253. __func__, ret);
  254. goto done;
  255. }
  256. memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
  257. done:
  258. return ret;
  259. }
  260. static int wcd_spi_write_single(struct spi_device *spi,
  261. u32 remote_addr, u32 val)
  262. {
  263. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  264. struct spi_transfer *xfer = &wcd_spi->xfer1;
  265. u8 *tx_buf = wcd_spi->tx_buf;
  266. u32 frame = 0;
  267. dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
  268. __func__, remote_addr, val);
  269. memset(tx_buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
  270. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  271. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  272. frame = cpu_to_be32(frame);
  273. memcpy(tx_buf, &frame, sizeof(frame));
  274. memcpy(tx_buf + sizeof(frame), &val, sizeof(val));
  275. wcd_spi_reinit_xfer(xfer);
  276. xfer->tx_buf = tx_buf;
  277. xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
  278. return spi_sync(spi, &wcd_spi->msg1);
  279. }
  280. static int wcd_spi_write_multi(struct spi_device *spi,
  281. u32 remote_addr, u8 *data,
  282. size_t len)
  283. {
  284. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  285. struct spi_transfer *xfer = &wcd_spi->xfer1;
  286. u32 frame = 0;
  287. u8 *tx_buf = wcd_spi->tx_buf;
  288. int xfer_len, ret;
  289. dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
  290. __func__, remote_addr, len);
  291. frame |= WCD_SPI_WRITE_FRAME_OPCODE;
  292. frame |= (remote_addr & WCD_CMD_ADDR_MASK);
  293. frame = cpu_to_be32(frame);
  294. xfer_len = len + sizeof(frame);
  295. if (!tx_buf) {
  296. dev_err(&spi->dev, "%s: tx_buf not allocated\n",
  297. __func__);
  298. return -ENOMEM;
  299. }
  300. memcpy(tx_buf, &frame, sizeof(frame));
  301. memcpy(tx_buf + sizeof(frame), data, len);
  302. wcd_spi_reinit_xfer(xfer);
  303. xfer->tx_buf = tx_buf;
  304. xfer->len = xfer_len;
  305. ret = spi_sync(spi, &wcd_spi->msg1);
  306. if (ret < 0)
  307. dev_err(&spi->dev,
  308. "%s: Failed, addr = 0x%x, len = %zd\n",
  309. __func__, remote_addr, len);
  310. return ret;
  311. }
  312. static int wcd_spi_transfer_split(struct spi_device *spi,
  313. struct wcd_spi_msg *data_msg,
  314. enum xfer_request xfer_req)
  315. {
  316. u32 addr = data_msg->remote_addr;
  317. u8 *data = data_msg->data;
  318. int remain_size = data_msg->len;
  319. int to_xfer, loop_cnt, ret = 0;
  320. /* Perform single writes until multi word alignment is met */
  321. loop_cnt = 1;
  322. while (remain_size &&
  323. !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
  324. if (xfer_req == WCD_SPI_XFER_WRITE)
  325. ret = wcd_spi_write_single(spi, addr,
  326. (*(u32 *)data));
  327. else
  328. ret = wcd_spi_read_single(spi, addr,
  329. (u32 *)data);
  330. if (ret < 0) {
  331. dev_err(&spi->dev,
  332. "%s: %s fail iter(%d) start-word addr (0x%x)\n",
  333. __func__, wcd_spi_xfer_req_str(xfer_req),
  334. loop_cnt, addr);
  335. goto done;
  336. }
  337. addr += WCD_SPI_WORD_BYTE_CNT;
  338. data += WCD_SPI_WORD_BYTE_CNT;
  339. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  340. loop_cnt++;
  341. }
  342. /* Perform multi writes for max allowed multi writes */
  343. loop_cnt = 1;
  344. while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
  345. if (xfer_req == WCD_SPI_XFER_WRITE)
  346. ret = wcd_spi_write_multi(spi, addr, data,
  347. WCD_SPI_RW_MULTI_MAX_LEN);
  348. else
  349. ret = wcd_spi_read_multi(spi, addr, data,
  350. WCD_SPI_RW_MULTI_MAX_LEN);
  351. if (ret < 0) {
  352. dev_err(&spi->dev,
  353. "%s: %s fail iter(%d) max-write addr (0x%x)\n",
  354. __func__, wcd_spi_xfer_req_str(xfer_req),
  355. loop_cnt, addr);
  356. goto done;
  357. }
  358. addr += WCD_SPI_RW_MULTI_MAX_LEN;
  359. data += WCD_SPI_RW_MULTI_MAX_LEN;
  360. remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
  361. loop_cnt++;
  362. }
  363. /*
  364. * Perform write for max possible data that is multiple
  365. * of the minimum size for multi-write commands.
  366. */
  367. to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
  368. if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
  369. to_xfer > 0) {
  370. if (xfer_req == WCD_SPI_XFER_WRITE)
  371. ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
  372. else
  373. ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
  374. if (ret < 0) {
  375. dev_err(&spi->dev,
  376. "%s: %s fail write addr (0x%x), size (0x%x)\n",
  377. __func__, wcd_spi_xfer_req_str(xfer_req),
  378. addr, to_xfer);
  379. goto done;
  380. }
  381. addr += to_xfer;
  382. data += to_xfer;
  383. remain_size -= to_xfer;
  384. }
  385. /* Perform single writes for the last remaining data */
  386. loop_cnt = 1;
  387. while (remain_size > 0) {
  388. if (xfer_req == WCD_SPI_XFER_WRITE)
  389. ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
  390. else
  391. ret = wcd_spi_read_single(spi, addr, (u32 *) data);
  392. if (ret < 0) {
  393. dev_err(&spi->dev,
  394. "%s: %s fail iter(%d) end-write addr (0x%x)\n",
  395. __func__, wcd_spi_xfer_req_str(xfer_req),
  396. loop_cnt, addr);
  397. goto done;
  398. }
  399. addr += WCD_SPI_WORD_BYTE_CNT;
  400. data += WCD_SPI_WORD_BYTE_CNT;
  401. remain_size -= WCD_SPI_WORD_BYTE_CNT;
  402. loop_cnt++;
  403. }
  404. done:
  405. return ret;
  406. }
  407. static int wcd_spi_cmd_nop(struct spi_device *spi)
  408. {
  409. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  410. u8 *tx_buf = wcd_spi->tx_buf;
  411. tx_buf[0] = WCD_SPI_CMD_NOP;
  412. return spi_write(spi, tx_buf, WCD_SPI_CMD_NOP_LEN);
  413. }
  414. static int wcd_spi_cmd_clkreq(struct spi_device *spi)
  415. {
  416. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  417. struct spi_transfer *xfer = &wcd_spi->xfer1;
  418. u8 *tx_buf = wcd_spi->tx_buf;
  419. u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
  420. WCD_SPI_CMD_CLKREQ,
  421. 0xBA, 0x80, 0x00};
  422. memcpy(tx_buf, cmd, WCD_SPI_CMD_CLKREQ_LEN);
  423. wcd_spi_reinit_xfer(xfer);
  424. xfer->tx_buf = tx_buf;
  425. xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
  426. xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
  427. return spi_sync(spi, &wcd_spi->msg1);
  428. }
  429. static int wcd_spi_cmd_wr_en(struct spi_device *spi)
  430. {
  431. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  432. u8 *tx_buf = wcd_spi->tx_buf;
  433. tx_buf[0] = WCD_SPI_CMD_WREN;
  434. return spi_write(spi, tx_buf, WCD_SPI_CMD_WREN_LEN);
  435. }
  436. static int wcd_spi_cmd_rdsr(struct spi_device *spi,
  437. u32 *rdsr_status)
  438. {
  439. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  440. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  441. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  442. u8 *tx_buf = wcd_spi->tx_buf;
  443. u8 *rx_buf = wcd_spi->rx_buf;
  444. int ret;
  445. tx_buf[0] = WCD_SPI_CMD_RDSR;
  446. wcd_spi_reinit_xfer(tx_xfer);
  447. tx_xfer->tx_buf = tx_buf;
  448. tx_xfer->len = WCD_SPI_OPCODE_LEN;
  449. memset(rx_buf, 0, sizeof(*rdsr_status));
  450. wcd_spi_reinit_xfer(rx_xfer);
  451. rx_xfer->rx_buf = rx_buf;
  452. rx_xfer->len = sizeof(*rdsr_status);
  453. ret = spi_sync(spi, &wcd_spi->msg2);
  454. if (ret < 0) {
  455. dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
  456. __func__, ret);
  457. goto done;
  458. }
  459. *rdsr_status = be32_to_cpu(*((u32*)rx_buf));
  460. dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
  461. __func__, *rdsr_status);
  462. done:
  463. return ret;
  464. }
  465. static int wcd_spi_clk_enable(struct spi_device *spi)
  466. {
  467. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  468. int ret;
  469. u32 rd_status = 0;
  470. ret = wcd_spi_cmd_nop(spi);
  471. if (ret < 0) {
  472. dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
  473. __func__, ret);
  474. goto done;
  475. }
  476. ret = wcd_spi_cmd_clkreq(spi);
  477. if (ret < 0) {
  478. dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
  479. __func__, ret);
  480. goto done;
  481. }
  482. ret = wcd_spi_cmd_nop(spi);
  483. if (ret < 0) {
  484. dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
  485. __func__, ret);
  486. goto done;
  487. }
  488. wcd_spi_cmd_rdsr(spi, &rd_status);
  489. /*
  490. * Read status zero means reads are not
  491. * happenning on the bus, possibly because
  492. * clock request failed.
  493. */
  494. if (rd_status) {
  495. set_bit(WCD_SPI_CLK_STATE_ENABLED,
  496. &wcd_spi->status_mask);
  497. } else {
  498. dev_err(&spi->dev, "%s: RDSR status is zero\n",
  499. __func__);
  500. ret = -EIO;
  501. }
  502. done:
  503. return ret;
  504. }
  505. static int wcd_spi_clk_disable(struct spi_device *spi)
  506. {
  507. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  508. int ret;
  509. ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
  510. if (ret < 0)
  511. dev_err(&spi->dev, "%s: Failed, err = %d\n",
  512. __func__, ret);
  513. /*
  514. * clear this bit even if clock disable failed
  515. * as the source clocks might get turned off.
  516. */
  517. clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
  518. return ret;
  519. }
  520. static int wcd_spi_clk_ctrl(struct spi_device *spi,
  521. bool request, u32 flags)
  522. {
  523. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  524. int ret = 0;
  525. const char *delay_str;
  526. delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
  527. "delayed" : "immediate";
  528. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  529. /* Reject any unbalanced disable request */
  530. if (wcd_spi->clk_users < 0 ||
  531. (!request && wcd_spi->clk_users == 0)) {
  532. dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
  533. __func__, wcd_spi->clk_users,
  534. request ? "enable" : "disable");
  535. ret = -EINVAL;
  536. /* Reset the clk_users to 0 */
  537. wcd_spi->clk_users = 0;
  538. goto done;
  539. }
  540. if (request == WCD_SPI_CLK_ENABLE) {
  541. /*
  542. * If the SPI bus is suspended, then return error
  543. * as the transaction cannot be completed.
  544. */
  545. if (wcd_spi_is_suspended(wcd_spi)) {
  546. dev_err(&spi->dev,
  547. "%s: SPI suspended, cannot enable clk\n",
  548. __func__);
  549. ret = -EIO;
  550. goto done;
  551. }
  552. /* Cancel the disable clk work */
  553. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  554. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  555. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  556. wcd_spi->clk_users++;
  557. /*
  558. * If clk state is already set,
  559. * then clk wasnt really disabled
  560. */
  561. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  562. goto done;
  563. else if (wcd_spi->clk_users == 1)
  564. ret = wcd_spi_clk_enable(spi);
  565. } else {
  566. wcd_spi->clk_users--;
  567. /* Clock is still voted for */
  568. if (wcd_spi->clk_users > 0)
  569. goto done;
  570. /*
  571. * If we are here, clk_users must be 0 and needs
  572. * to be disabled. Call the disable based on the
  573. * flags.
  574. */
  575. if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
  576. schedule_delayed_work(&wcd_spi->clk_dwork,
  577. msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
  578. } else {
  579. ret = wcd_spi_clk_disable(spi);
  580. if (ret < 0)
  581. dev_err(&spi->dev,
  582. "%s: Failed to disable clk err = %d\n",
  583. __func__, ret);
  584. }
  585. }
  586. done:
  587. dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
  588. __func__, wcd_spi->clk_users, request ? "enable" : "disable",
  589. request ? "" : delay_str);
  590. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  591. return ret;
  592. }
  593. static int wcd_spi_init(struct spi_device *spi)
  594. {
  595. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  596. int ret;
  597. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  598. WCD_SPI_CLK_FLAG_IMMEDIATE);
  599. if (ret < 0)
  600. goto done;
  601. ret = wcd_spi_cmd_wr_en(spi);
  602. if (ret < 0)
  603. goto err_wr_en;
  604. /*
  605. * In case spi_init is called after component deinit,
  606. * it is possible hardware register state is also reset.
  607. * Sync the regcache here so hardware state is updated
  608. * to reflect the cache.
  609. */
  610. regcache_sync(wcd_spi->regmap);
  611. regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
  612. 0x0F3D0800);
  613. /* Write the MTU to max allowed size */
  614. regmap_update_bits(wcd_spi->regmap,
  615. WCD_SPI_SLAVE_TRNS_LEN,
  616. 0xFFFF0000, 0xFFFF0000);
  617. err_wr_en:
  618. wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  619. WCD_SPI_CLK_FLAG_IMMEDIATE);
  620. done:
  621. return ret;
  622. }
  623. static void wcd_spi_clk_work(struct work_struct *work)
  624. {
  625. struct delayed_work *dwork;
  626. struct wcd_spi_priv *wcd_spi;
  627. struct spi_device *spi;
  628. int ret;
  629. dwork = to_delayed_work(work);
  630. wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
  631. spi = wcd_spi->spi;
  632. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  633. ret = wcd_spi_clk_disable(spi);
  634. if (ret < 0)
  635. dev_err(&spi->dev,
  636. "%s: Failed to disable clk, err = %d\n",
  637. __func__, ret);
  638. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  639. }
  640. static int __wcd_spi_data_xfer(struct spi_device *spi,
  641. struct wcd_spi_msg *msg,
  642. enum xfer_request xfer_req)
  643. {
  644. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  645. int ret;
  646. /* Check for minimum alignment requirements */
  647. if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
  648. dev_err(&spi->dev,
  649. "%s addr 0x%x is not aligned to 0x%x\n",
  650. __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
  651. return -EINVAL;
  652. } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
  653. dev_err(&spi->dev,
  654. "%s len 0x%zx is not multiple of %d\n",
  655. __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
  656. return -EINVAL;
  657. }
  658. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
  659. if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
  660. if (xfer_req == WCD_SPI_XFER_WRITE)
  661. ret = wcd_spi_write_single(spi, msg->remote_addr,
  662. (*((u32 *)msg->data)));
  663. else
  664. ret = wcd_spi_read_single(spi, msg->remote_addr,
  665. (u32 *) msg->data);
  666. } else {
  667. ret = wcd_spi_transfer_split(spi, msg, xfer_req);
  668. }
  669. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
  670. return ret;
  671. }
  672. static int wcd_spi_data_xfer(struct spi_device *spi,
  673. struct wcd_spi_msg *msg,
  674. enum xfer_request req)
  675. {
  676. int ret, ret1;
  677. if (msg->len <= 0) {
  678. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  679. __func__, msg->len);
  680. return -EINVAL;
  681. }
  682. /* Request for clock */
  683. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  684. WCD_SPI_CLK_FLAG_IMMEDIATE);
  685. if (ret < 0) {
  686. dev_err(&spi->dev, "%s: clk enable failed %d\n",
  687. __func__, ret);
  688. goto done;
  689. }
  690. /* Perform the transaction */
  691. ret = __wcd_spi_data_xfer(spi, msg, req);
  692. if (ret < 0)
  693. dev_err(&spi->dev,
  694. "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
  695. __func__, wcd_spi_xfer_req_str(req),
  696. msg->remote_addr, msg->len, ret);
  697. /* Release the clock even if xfer failed */
  698. ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  699. WCD_SPI_CLK_FLAG_DELAYED);
  700. if (ret1 < 0)
  701. dev_err(&spi->dev, "%s: clk disable failed %d\n",
  702. __func__, ret1);
  703. done:
  704. return ret;
  705. }
  706. /*
  707. * wcd_spi_data_write: Write data to WCD SPI
  708. * @spi: spi_device struct
  709. * @msg: msg that needs to be written to WCD
  710. *
  711. * This API writes length of data to address specified. These details
  712. * about the write are encapsulated in @msg. Write size should be multiple
  713. * of 4 bytes and write address should be 4-byte aligned.
  714. */
  715. static int wcd_spi_data_write(struct spi_device *spi,
  716. struct wcd_spi_msg *msg)
  717. {
  718. if (!spi || !msg) {
  719. pr_err("%s: Invalid %s\n", __func__,
  720. (!spi) ? "spi device" : "msg");
  721. return -EINVAL;
  722. }
  723. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
  724. __func__, msg->remote_addr, msg->len);
  725. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
  726. }
  727. /*
  728. * wcd_spi_data_read: Read data from WCD SPI
  729. * @spi: spi_device struct
  730. * @msg: msg that needs to be read from WCD
  731. *
  732. * This API reads length of data from address specified. These details
  733. * about the read are encapsulated in @msg. Read size should be multiple
  734. * of 4 bytes and read address should be 4-byte aligned.
  735. */
  736. static int wcd_spi_data_read(struct spi_device *spi,
  737. struct wcd_spi_msg *msg)
  738. {
  739. if (!spi || !msg) {
  740. pr_err("%s: Invalid %s\n", __func__,
  741. (!spi) ? "spi device" : "msg");
  742. return -EINVAL;
  743. }
  744. dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
  745. __func__, msg->remote_addr, msg->len);
  746. return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
  747. }
  748. static int wdsp_spi_dload_section(struct spi_device *spi,
  749. void *data)
  750. {
  751. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  752. struct wdsp_img_section *sec = data;
  753. struct wcd_spi_msg msg;
  754. int ret;
  755. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  756. __func__, sec->addr, sec->size);
  757. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  758. msg.data = sec->data;
  759. msg.len = sec->size;
  760. ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
  761. if (ret < 0)
  762. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  763. __func__, msg.remote_addr, msg.len);
  764. return ret;
  765. }
  766. static int wdsp_spi_read_section(struct spi_device *spi, void *data)
  767. {
  768. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  769. struct wdsp_img_section *sec = data;
  770. struct wcd_spi_msg msg;
  771. int ret;
  772. msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
  773. msg.data = sec->data;
  774. msg.len = sec->size;
  775. dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
  776. __func__, msg.remote_addr, msg.len);
  777. ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
  778. if (ret < 0)
  779. dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
  780. __func__, msg.remote_addr, msg.len);
  781. return ret;
  782. }
  783. static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
  784. enum wdsp_event_type event,
  785. void *data)
  786. {
  787. struct spi_device *spi = to_spi_device(dev);
  788. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  789. struct wcd_spi_ops *spi_ops;
  790. int ret = 0;
  791. dev_dbg(&spi->dev, "%s: event type %d\n",
  792. __func__, event);
  793. switch (event) {
  794. case WDSP_EVENT_POST_SHUTDOWN:
  795. cancel_delayed_work_sync(&wcd_spi->clk_dwork);
  796. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  797. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  798. wcd_spi_clk_disable(spi);
  799. wcd_spi->clk_users = 0;
  800. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  801. break;
  802. case WDSP_EVENT_PRE_DLOAD_CODE:
  803. case WDSP_EVENT_PRE_DLOAD_DATA:
  804. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
  805. WCD_SPI_CLK_FLAG_IMMEDIATE);
  806. if (ret < 0)
  807. dev_err(&spi->dev, "%s: clk_req failed %d\n",
  808. __func__, ret);
  809. break;
  810. case WDSP_EVENT_POST_DLOAD_CODE:
  811. case WDSP_EVENT_POST_DLOAD_DATA:
  812. case WDSP_EVENT_DLOAD_FAILED:
  813. ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
  814. WCD_SPI_CLK_FLAG_IMMEDIATE);
  815. if (ret < 0)
  816. dev_err(&spi->dev, "%s: clk unvote failed %d\n",
  817. __func__, ret);
  818. break;
  819. case WDSP_EVENT_DLOAD_SECTION:
  820. ret = wdsp_spi_dload_section(spi, data);
  821. break;
  822. case WDSP_EVENT_READ_SECTION:
  823. ret = wdsp_spi_read_section(spi, data);
  824. break;
  825. case WDSP_EVENT_SUSPEND:
  826. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  827. if (!wcd_spi_can_suspend(wcd_spi))
  828. ret = -EBUSY;
  829. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  830. break;
  831. case WDSP_EVENT_RESUME:
  832. ret = wcd_spi_wait_for_resume(wcd_spi);
  833. break;
  834. case WDSP_EVENT_GET_DEVOPS:
  835. if (!data) {
  836. dev_err(&spi->dev, "%s: invalid data\n",
  837. __func__);
  838. ret = -EINVAL;
  839. break;
  840. }
  841. spi_ops = (struct wcd_spi_ops *) data;
  842. spi_ops->spi_dev = spi;
  843. spi_ops->read_dev = wcd_spi_data_read;
  844. spi_ops->write_dev = wcd_spi_data_write;
  845. break;
  846. default:
  847. dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
  848. __func__, event);
  849. break;
  850. }
  851. return ret;
  852. }
  853. static int wcd_spi_bus_gwrite(void *context, const void *reg,
  854. size_t reg_len, const void *val,
  855. size_t val_len)
  856. {
  857. struct device *dev = context;
  858. struct spi_device *spi = to_spi_device(dev);
  859. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  860. u8 *tx_buf = wcd_spi->tx_buf;
  861. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  862. val_len != wcd_spi->val_bytes) {
  863. dev_err(&spi->dev,
  864. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  865. __func__, reg_len, val_len);
  866. return -EINVAL;
  867. }
  868. memset(tx_buf, 0, WCD_SPI_CMD_IRW_LEN);
  869. tx_buf[0] = WCD_SPI_CMD_IRW;
  870. tx_buf[1] = *((u8 *)reg);
  871. memcpy(tx_buf + WCD_SPI_OPCODE_LEN + reg_len,
  872. val, val_len);
  873. return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
  874. }
  875. static int wcd_spi_bus_write(void *context, const void *data,
  876. size_t count)
  877. {
  878. struct device *dev = context;
  879. struct spi_device *spi = to_spi_device(dev);
  880. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  881. if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
  882. dev_err(&spi->dev, "%s: Invalid size %zd\n",
  883. __func__, count);
  884. WARN_ON(1);
  885. return -EINVAL;
  886. }
  887. return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
  888. data + wcd_spi->reg_bytes,
  889. count - wcd_spi->reg_bytes);
  890. }
  891. static int wcd_spi_bus_read(void *context, const void *reg,
  892. size_t reg_len, void *val,
  893. size_t val_len)
  894. {
  895. struct device *dev = context;
  896. struct spi_device *spi = to_spi_device(dev);
  897. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  898. struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
  899. struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
  900. u8 *tx_buf = wcd_spi->tx_buf;
  901. u8 *rx_buf = wcd_spi->rx_buf;
  902. int ret = 0;
  903. if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
  904. val_len != wcd_spi->val_bytes) {
  905. dev_err(&spi->dev,
  906. "%s: Invalid input, reg_len = %zd, val_len = %zd",
  907. __func__, reg_len, val_len);
  908. return -EINVAL;
  909. }
  910. memset(tx_buf, 0, WCD_SPI_CMD_IRR_LEN);
  911. tx_buf[0] = WCD_SPI_CMD_IRR;
  912. tx_buf[1] = *((u8 *)reg);
  913. wcd_spi_reinit_xfer(tx_xfer);
  914. tx_xfer->tx_buf = tx_buf;
  915. tx_xfer->rx_buf = NULL;
  916. tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
  917. wcd_spi_reinit_xfer(rx_xfer);
  918. rx_xfer->tx_buf = NULL;
  919. rx_xfer->rx_buf = rx_buf;
  920. rx_xfer->len = val_len;
  921. ret = spi_sync(spi, &wcd_spi->msg2);
  922. if (ret) {
  923. dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
  924. __func__, ret);
  925. goto done;
  926. }
  927. memcpy(val, rx_buf, val_len);
  928. done:
  929. return ret;
  930. }
  931. static struct regmap_bus wcd_spi_regmap_bus = {
  932. .write = wcd_spi_bus_write,
  933. .gather_write = wcd_spi_bus_gwrite,
  934. .read = wcd_spi_bus_read,
  935. .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
  936. .val_format_endian_default = REGMAP_ENDIAN_BIG,
  937. };
  938. static int wcd_spi_state_show(struct seq_file *f, void *ptr)
  939. {
  940. struct spi_device *spi = f->private;
  941. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  942. const char *clk_state, *clk_mutex, *xfer_mutex;
  943. if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
  944. clk_state = "enabled";
  945. else
  946. clk_state = "disabled";
  947. clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
  948. "locked" : "unlocked";
  949. xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
  950. "locked" : "unlocked";
  951. seq_printf(f, "clk_state = %s\nclk_users = %d\n"
  952. "clk_mutex = %s\nxfer_mutex = %s\n",
  953. clk_state, wcd_spi->clk_users, clk_mutex,
  954. xfer_mutex);
  955. return 0;
  956. }
  957. static int wcd_spi_state_open(struct inode *inode, struct file *file)
  958. {
  959. return single_open(file, wcd_spi_state_show, inode->i_private);
  960. }
  961. static const struct file_operations state_fops = {
  962. .open = wcd_spi_state_open,
  963. .read = seq_read,
  964. .llseek = seq_lseek,
  965. .release = single_release,
  966. };
  967. static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
  968. size_t count, loff_t *ppos)
  969. {
  970. struct spi_device *spi = file->private_data;
  971. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  972. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  973. struct wcd_spi_msg msg;
  974. ssize_t buf_size, read_count = 0;
  975. char *buf;
  976. int ret;
  977. if (*ppos < 0 || !count)
  978. return -EINVAL;
  979. if (dbg_data->size == 0 || dbg_data->addr == 0) {
  980. dev_err(&spi->dev,
  981. "%s: Invalid request, size = %u, addr = 0x%x\n",
  982. __func__, dbg_data->size, dbg_data->addr);
  983. return 0;
  984. }
  985. buf_size = count < dbg_data->size ? count : dbg_data->size;
  986. buf = kzalloc(buf_size, GFP_KERNEL);
  987. if (!buf)
  988. return -ENOMEM;
  989. msg.data = buf;
  990. msg.remote_addr = dbg_data->addr;
  991. msg.len = buf_size;
  992. msg.flags = 0;
  993. ret = wcd_spi_data_read(spi, &msg);
  994. if (ret < 0) {
  995. dev_err(&spi->dev,
  996. "%s: Failed to read %zu bytes from addr 0x%x\n",
  997. __func__, buf_size, msg.remote_addr);
  998. goto done;
  999. }
  1000. read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
  1001. done:
  1002. kfree(buf);
  1003. if (ret < 0)
  1004. return ret;
  1005. else
  1006. return read_count;
  1007. }
  1008. static const struct file_operations mem_read_fops = {
  1009. .open = simple_open,
  1010. .read = wcd_spi_debugfs_mem_read,
  1011. };
  1012. static int wcd_spi_debugfs_init(struct spi_device *spi)
  1013. {
  1014. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1015. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1016. int rc = 0;
  1017. dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
  1018. if (IS_ERR_OR_NULL(dbg_data->dir)) {
  1019. dbg_data->dir = NULL;
  1020. rc = -ENODEV;
  1021. goto done;
  1022. }
  1023. debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
  1024. debugfs_create_u32("addr", 0644, dbg_data->dir,
  1025. &dbg_data->addr);
  1026. debugfs_create_u32("size", 0644, dbg_data->dir,
  1027. &dbg_data->size);
  1028. debugfs_create_file("mem_read", 0444, dbg_data->dir,
  1029. spi, &mem_read_fops);
  1030. done:
  1031. return rc;
  1032. }
  1033. static const struct reg_default wcd_spi_defaults[] = {
  1034. {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
  1035. {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
  1036. {WCD_SPI_SLAVE_STATUS, 0x80100000},
  1037. {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
  1038. {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
  1039. {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
  1040. {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
  1041. {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
  1042. {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
  1043. {WCD_SPI_SLAVE_TX, 0x00000000},
  1044. {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
  1045. {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
  1046. {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
  1047. {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
  1048. {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
  1049. {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
  1050. {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
  1051. {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
  1052. {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
  1053. {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
  1054. {WCD_SPI_SLAVE_GENERICS, 0x80000000},
  1055. {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
  1056. };
  1057. static bool wcd_spi_is_volatile_reg(struct device *dev,
  1058. unsigned int reg)
  1059. {
  1060. switch (reg) {
  1061. case WCD_SPI_SLAVE_SANITY:
  1062. case WCD_SPI_SLAVE_STATUS:
  1063. case WCD_SPI_SLAVE_IRQ_STATUS:
  1064. case WCD_SPI_SLAVE_TX:
  1065. case WCD_SPI_SLAVE_SW_RST_IRQ:
  1066. case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
  1067. case WCD_SPI_SLAVE_FIFO_LEVEL:
  1068. case WCD_SPI_SLAVE_GENERICS:
  1069. return true;
  1070. }
  1071. return false;
  1072. }
  1073. static bool wcd_spi_is_readable_reg(struct device *dev,
  1074. unsigned int reg)
  1075. {
  1076. switch (reg) {
  1077. case WCD_SPI_SLAVE_SW_RESET:
  1078. case WCD_SPI_SLAVE_IRQ_CLR:
  1079. case WCD_SPI_SLAVE_IRQ_FORCE:
  1080. return false;
  1081. }
  1082. return true;
  1083. }
  1084. static struct regmap_config wcd_spi_regmap_cfg = {
  1085. .reg_bits = 8,
  1086. .val_bits = 32,
  1087. .cache_type = REGCACHE_RBTREE,
  1088. .reg_defaults = wcd_spi_defaults,
  1089. .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
  1090. .max_register = WCD_SPI_MAX_REGISTER,
  1091. .volatile_reg = wcd_spi_is_volatile_reg,
  1092. .readable_reg = wcd_spi_is_readable_reg,
  1093. };
  1094. static int wdsp_spi_init(struct device *dev, void *priv_data)
  1095. {
  1096. struct spi_device *spi = to_spi_device(dev);
  1097. int ret;
  1098. ret = wcd_spi_init(spi);
  1099. if (ret < 0)
  1100. dev_err(&spi->dev, "%s: Init failed, err = %d\n",
  1101. __func__, ret);
  1102. return ret;
  1103. }
  1104. static int wdsp_spi_deinit(struct device *dev, void *priv_data)
  1105. {
  1106. struct spi_device *spi = to_spi_device(dev);
  1107. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1108. /*
  1109. * Deinit means the hardware is reset. Mark the cache
  1110. * as dirty here, so init will sync the cache
  1111. */
  1112. regcache_mark_dirty(wcd_spi->regmap);
  1113. return 0;
  1114. }
  1115. static struct wdsp_cmpnt_ops wdsp_spi_ops = {
  1116. .init = wdsp_spi_init,
  1117. .deinit = wdsp_spi_deinit,
  1118. .event_handler = wdsp_spi_event_handler,
  1119. };
  1120. static int wcd_spi_component_bind(struct device *dev,
  1121. struct device *master,
  1122. void *data)
  1123. {
  1124. struct spi_device *spi = to_spi_device(dev);
  1125. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1126. int ret = 0;
  1127. wcd_spi->m_dev = master;
  1128. wcd_spi->m_ops = data;
  1129. if (wcd_spi->m_ops &&
  1130. wcd_spi->m_ops->register_cmpnt_ops)
  1131. ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
  1132. wcd_spi,
  1133. &wdsp_spi_ops);
  1134. if (ret) {
  1135. dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
  1136. __func__, ret);
  1137. goto done;
  1138. }
  1139. wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
  1140. wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
  1141. wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
  1142. &spi->dev, &wcd_spi_regmap_cfg);
  1143. if (IS_ERR(wcd_spi->regmap)) {
  1144. ret = PTR_ERR(wcd_spi->regmap);
  1145. dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
  1146. __func__, ret);
  1147. goto done;
  1148. }
  1149. if (wcd_spi_debugfs_init(spi))
  1150. dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
  1151. spi_message_init(&wcd_spi->msg1);
  1152. spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
  1153. spi_message_init(&wcd_spi->msg2);
  1154. spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
  1155. spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
  1156. /* Pre-allocate the buffers */
  1157. wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
  1158. GFP_KERNEL | GFP_DMA);
  1159. if (!wcd_spi->tx_buf) {
  1160. ret = -ENOMEM;
  1161. goto done;
  1162. }
  1163. wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
  1164. GFP_KERNEL | GFP_DMA);
  1165. if (!wcd_spi->rx_buf) {
  1166. kfree(wcd_spi->tx_buf);
  1167. wcd_spi->tx_buf = NULL;
  1168. ret = -ENOMEM;
  1169. goto done;
  1170. }
  1171. done:
  1172. return ret;
  1173. }
  1174. static void wcd_spi_component_unbind(struct device *dev,
  1175. struct device *master,
  1176. void *data)
  1177. {
  1178. struct spi_device *spi = to_spi_device(dev);
  1179. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1180. struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
  1181. debugfs_remove_recursive(dbg_data->dir);
  1182. dbg_data->dir = NULL;
  1183. wcd_spi->m_dev = NULL;
  1184. wcd_spi->m_ops = NULL;
  1185. spi_transfer_del(&wcd_spi->xfer1);
  1186. spi_transfer_del(&wcd_spi->xfer2[0]);
  1187. spi_transfer_del(&wcd_spi->xfer2[1]);
  1188. kfree(wcd_spi->tx_buf);
  1189. kfree(wcd_spi->rx_buf);
  1190. wcd_spi->tx_buf = NULL;
  1191. wcd_spi->rx_buf = NULL;
  1192. }
  1193. static const struct component_ops wcd_spi_component_ops = {
  1194. .bind = wcd_spi_component_bind,
  1195. .unbind = wcd_spi_component_unbind,
  1196. };
  1197. static int wcd_spi_probe(struct spi_device *spi)
  1198. {
  1199. struct wcd_spi_priv *wcd_spi;
  1200. int ret = 0;
  1201. wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
  1202. GFP_KERNEL);
  1203. if (!wcd_spi)
  1204. return -ENOMEM;
  1205. ret = of_property_read_u32(spi->dev.of_node,
  1206. "qcom,mem-base-addr",
  1207. &wcd_spi->mem_base_addr);
  1208. if (ret < 0) {
  1209. dev_err(&spi->dev, "%s: Missing %s DT entry",
  1210. __func__, "qcom,mem-base-addr");
  1211. goto err_ret;
  1212. }
  1213. dev_dbg(&spi->dev,
  1214. "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
  1215. mutex_init(&wcd_spi->clk_mutex);
  1216. mutex_init(&wcd_spi->xfer_mutex);
  1217. INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
  1218. init_completion(&wcd_spi->resume_comp);
  1219. wcd_spi->spi = spi;
  1220. spi_set_drvdata(spi, wcd_spi);
  1221. ret = component_add(&spi->dev, &wcd_spi_component_ops);
  1222. if (ret) {
  1223. dev_err(&spi->dev, "%s: component_add failed err = %d\n",
  1224. __func__, ret);
  1225. goto err_component_add;
  1226. }
  1227. return ret;
  1228. err_component_add:
  1229. mutex_destroy(&wcd_spi->clk_mutex);
  1230. mutex_destroy(&wcd_spi->xfer_mutex);
  1231. err_ret:
  1232. devm_kfree(&spi->dev, wcd_spi);
  1233. spi_set_drvdata(spi, NULL);
  1234. return ret;
  1235. }
  1236. static int wcd_spi_remove(struct spi_device *spi)
  1237. {
  1238. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1239. component_del(&spi->dev, &wcd_spi_component_ops);
  1240. mutex_destroy(&wcd_spi->clk_mutex);
  1241. mutex_destroy(&wcd_spi->xfer_mutex);
  1242. devm_kfree(&spi->dev, wcd_spi);
  1243. spi_set_drvdata(spi, NULL);
  1244. return 0;
  1245. }
  1246. #ifdef CONFIG_PM
  1247. static int wcd_spi_suspend(struct device *dev)
  1248. {
  1249. struct spi_device *spi = to_spi_device(dev);
  1250. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1251. int rc = 0;
  1252. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1253. if (!wcd_spi_can_suspend(wcd_spi)) {
  1254. rc = -EBUSY;
  1255. goto done;
  1256. }
  1257. /*
  1258. * If we are here, it is okay to let the suspend go
  1259. * through for this driver. But, still need to notify
  1260. * the master to make sure all other components can suspend
  1261. * as well.
  1262. */
  1263. if (wcd_spi->m_dev && wcd_spi->m_ops &&
  1264. wcd_spi->m_ops->suspend) {
  1265. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1266. rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
  1267. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1268. }
  1269. if (rc == 0)
  1270. set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1271. else
  1272. dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
  1273. __func__, rc);
  1274. done:
  1275. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1276. return rc;
  1277. }
  1278. static int wcd_spi_resume(struct device *dev)
  1279. {
  1280. struct spi_device *spi = to_spi_device(dev);
  1281. struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
  1282. WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
  1283. clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
  1284. complete(&wcd_spi->resume_comp);
  1285. WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
  1286. return 0;
  1287. }
  1288. static const struct dev_pm_ops wcd_spi_pm_ops = {
  1289. .suspend = wcd_spi_suspend,
  1290. .resume = wcd_spi_resume,
  1291. };
  1292. #endif
  1293. static const struct of_device_id wcd_spi_of_match[] = {
  1294. { .compatible = "qcom,wcd-spi-v2", },
  1295. { }
  1296. };
  1297. MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
  1298. static struct spi_driver wcd_spi_driver = {
  1299. .driver = {
  1300. .name = "wcd-spi-v2",
  1301. .of_match_table = wcd_spi_of_match,
  1302. #ifdef CONFIG_PM
  1303. .pm = &wcd_spi_pm_ops,
  1304. #endif
  1305. },
  1306. .probe = wcd_spi_probe,
  1307. .remove = wcd_spi_remove,
  1308. };
  1309. module_spi_driver(wcd_spi_driver);
  1310. MODULE_DESCRIPTION("WCD SPI driver");
  1311. MODULE_LICENSE("GPL v2");