mca.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. //
  3. // Apple SoCs MCA driver
  4. //
  5. // Copyright (C) The Asahi Linux Contributors
  6. //
  7. // The MCA peripheral is made up of a number of identical units called clusters.
  8. // Each cluster has its separate clock parent, SYNC signal generator, carries
  9. // four SERDES units and has a dedicated I2S port on the SoC's periphery.
  10. //
  11. // The clusters can operate independently, or can be combined together in a
  12. // configurable manner. We mostly treat them as self-contained independent
  13. // units and don't configure any cross-cluster connections except for the I2S
  14. // ports. The I2S ports can be routed to any of the clusters (irrespective
  15. // of their native cluster). We map this onto ASoC's (DPCM) notion of backend
  16. // and frontend DAIs. The 'cluster guts' are frontends which are dynamically
  17. // routed to backend I2S ports.
  18. //
  19. // DAI references in devicetree are resolved to backends. The routing between
  20. // frontends and backends is determined by the machine driver in the DAPM paths
  21. // it supplies.
  22. #include <linux/bitfield.h>
  23. #include <linux/clk.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/init.h>
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/of.h>
  29. #include <linux/of_clk.h>
  30. #include <linux/of_dma.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/pm_domain.h>
  33. #include <linux/regmap.h>
  34. #include <linux/reset.h>
  35. #include <linux/slab.h>
  36. #include <sound/core.h>
  37. #include <sound/pcm.h>
  38. #include <sound/pcm_params.h>
  39. #include <sound/soc.h>
  40. #include <sound/dmaengine_pcm.h>
  41. #define USE_RXB_FOR_CAPTURE
  42. /* Relative to cluster base */
  43. #define REG_STATUS 0x0
  44. #define STATUS_MCLK_EN BIT(0)
  45. #define REG_MCLK_CONF 0x4
  46. #define MCLK_CONF_DIV GENMASK(11, 8)
  47. #define REG_SYNCGEN_STATUS 0x100
  48. #define SYNCGEN_STATUS_EN BIT(0)
  49. #define REG_SYNCGEN_MCLK_SEL 0x104
  50. #define SYNCGEN_MCLK_SEL GENMASK(3, 0)
  51. #define REG_SYNCGEN_HI_PERIOD 0x108
  52. #define REG_SYNCGEN_LO_PERIOD 0x10c
  53. #define REG_PORT_ENABLES 0x600
  54. #define PORT_ENABLES_CLOCKS GENMASK(2, 1)
  55. #define PORT_ENABLES_TX_DATA BIT(3)
  56. #define REG_PORT_CLOCK_SEL 0x604
  57. #define PORT_CLOCK_SEL GENMASK(11, 8)
  58. #define REG_PORT_DATA_SEL 0x608
  59. #define PORT_DATA_SEL_TXA(cl) (1 << ((cl)*2))
  60. #define PORT_DATA_SEL_TXB(cl) (2 << ((cl)*2))
  61. #define REG_INTSTATE 0x700
  62. #define REG_INTMASK 0x704
  63. /* Bases of serdes units (relative to cluster) */
  64. #define CLUSTER_RXA_OFF 0x200
  65. #define CLUSTER_TXA_OFF 0x300
  66. #define CLUSTER_RXB_OFF 0x400
  67. #define CLUSTER_TXB_OFF 0x500
  68. #define CLUSTER_TX_OFF CLUSTER_TXA_OFF
  69. #ifndef USE_RXB_FOR_CAPTURE
  70. #define CLUSTER_RX_OFF CLUSTER_RXA_OFF
  71. #else
  72. #define CLUSTER_RX_OFF CLUSTER_RXB_OFF
  73. #endif
  74. /* Relative to serdes unit base */
  75. #define REG_SERDES_STATUS 0x00
  76. #define SERDES_STATUS_EN BIT(0)
  77. #define SERDES_STATUS_RST BIT(1)
  78. #define REG_TX_SERDES_CONF 0x04
  79. #define REG_RX_SERDES_CONF 0x08
  80. #define SERDES_CONF_NCHANS GENMASK(3, 0)
  81. #define SERDES_CONF_WIDTH_MASK GENMASK(8, 4)
  82. #define SERDES_CONF_WIDTH_16BIT 0x40
  83. #define SERDES_CONF_WIDTH_20BIT 0x80
  84. #define SERDES_CONF_WIDTH_24BIT 0xc0
  85. #define SERDES_CONF_WIDTH_32BIT 0x100
  86. #define SERDES_CONF_BCLK_POL 0x400
  87. #define SERDES_CONF_LSB_FIRST 0x800
  88. #define SERDES_CONF_UNK1 BIT(12)
  89. #define SERDES_CONF_UNK2 BIT(13)
  90. #define SERDES_CONF_UNK3 BIT(14)
  91. #define SERDES_CONF_NO_DATA_FEEDBACK BIT(15)
  92. #define SERDES_CONF_SYNC_SEL GENMASK(18, 16)
  93. #define REG_TX_SERDES_BITSTART 0x08
  94. #define REG_RX_SERDES_BITSTART 0x0c
  95. #define REG_TX_SERDES_SLOTMASK 0x0c
  96. #define REG_RX_SERDES_SLOTMASK 0x10
  97. #define REG_RX_SERDES_PORT 0x04
  98. /* Relative to switch base */
  99. #define REG_DMA_ADAPTER_A(cl) (0x8000 * (cl))
  100. #define REG_DMA_ADAPTER_B(cl) (0x8000 * (cl) + 0x4000)
  101. #define DMA_ADAPTER_TX_LSB_PAD GENMASK(4, 0)
  102. #define DMA_ADAPTER_TX_NCHANS GENMASK(6, 5)
  103. #define DMA_ADAPTER_RX_MSB_PAD GENMASK(12, 8)
  104. #define DMA_ADAPTER_RX_NCHANS GENMASK(14, 13)
  105. #define DMA_ADAPTER_NCHANS GENMASK(22, 20)
  106. #define SWITCH_STRIDE 0x8000
  107. #define CLUSTER_STRIDE 0x4000
  108. #define MAX_NCLUSTERS 6
  109. #define APPLE_MCA_FMTBITS (SNDRV_PCM_FMTBIT_S16_LE | \
  110. SNDRV_PCM_FMTBIT_S24_LE | \
  111. SNDRV_PCM_FMTBIT_S32_LE)
  112. struct mca_cluster {
  113. int no;
  114. __iomem void *base;
  115. struct mca_data *host;
  116. struct device *pd_dev;
  117. struct clk *clk_parent;
  118. struct dma_chan *dma_chans[SNDRV_PCM_STREAM_LAST + 1];
  119. bool port_started[SNDRV_PCM_STREAM_LAST + 1];
  120. int port_driver; /* The cluster driving this cluster's port */
  121. bool clocks_in_use[SNDRV_PCM_STREAM_LAST + 1];
  122. struct device_link *pd_link;
  123. unsigned int bclk_ratio;
  124. /* Masks etc. picked up via the set_tdm_slot method */
  125. int tdm_slots;
  126. int tdm_slot_width;
  127. unsigned int tdm_tx_mask;
  128. unsigned int tdm_rx_mask;
  129. };
  130. struct mca_data {
  131. struct device *dev;
  132. __iomem void *switch_base;
  133. struct device *pd_dev;
  134. struct reset_control *rstc;
  135. struct device_link *pd_link;
  136. /* Mutex for accessing port_driver of foreign clusters */
  137. struct mutex port_mutex;
  138. int nclusters;
  139. struct mca_cluster clusters[];
  140. };
  141. static void mca_modify(struct mca_cluster *cl, int regoffset, u32 mask, u32 val)
  142. {
  143. __iomem void *ptr = cl->base + regoffset;
  144. u32 newval;
  145. newval = (val & mask) | (readl_relaxed(ptr) & ~mask);
  146. writel_relaxed(newval, ptr);
  147. }
  148. /*
  149. * Get the cluster of FE or BE DAI
  150. */
  151. static struct mca_cluster *mca_dai_to_cluster(struct snd_soc_dai *dai)
  152. {
  153. struct mca_data *mca = snd_soc_dai_get_drvdata(dai);
  154. /*
  155. * FE DAIs are 0 ... nclusters - 1
  156. * BE DAIs are nclusters ... 2*nclusters - 1
  157. */
  158. int cluster_no = dai->id % mca->nclusters;
  159. return &mca->clusters[cluster_no];
  160. }
  161. /* called before PCM trigger */
  162. static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd,
  163. struct snd_soc_dai *dai)
  164. {
  165. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  166. bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
  167. int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
  168. int serdes_conf =
  169. serdes_unit + (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF);
  170. switch (cmd) {
  171. case SNDRV_PCM_TRIGGER_START:
  172. case SNDRV_PCM_TRIGGER_RESUME:
  173. case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
  174. mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
  175. FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
  176. mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
  177. FIELD_PREP(SERDES_CONF_SYNC_SEL, 7));
  178. mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
  179. SERDES_STATUS_EN | SERDES_STATUS_RST,
  180. SERDES_STATUS_RST);
  181. /*
  182. * Experiments suggest that it takes at most ~1 us
  183. * for the bit to clear, so wait 2 us for good measure.
  184. */
  185. udelay(2);
  186. WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) &
  187. SERDES_STATUS_RST);
  188. mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
  189. FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
  190. mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
  191. FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1));
  192. break;
  193. default:
  194. break;
  195. }
  196. }
  197. static int mca_fe_trigger(struct snd_pcm_substream *substream, int cmd,
  198. struct snd_soc_dai *dai)
  199. {
  200. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  201. bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
  202. int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
  203. switch (cmd) {
  204. case SNDRV_PCM_TRIGGER_START:
  205. case SNDRV_PCM_TRIGGER_RESUME:
  206. case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
  207. mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
  208. SERDES_STATUS_EN | SERDES_STATUS_RST,
  209. SERDES_STATUS_EN);
  210. break;
  211. case SNDRV_PCM_TRIGGER_STOP:
  212. case SNDRV_PCM_TRIGGER_SUSPEND:
  213. case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
  214. mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
  215. SERDES_STATUS_EN, 0);
  216. break;
  217. default:
  218. return -EINVAL;
  219. }
  220. return 0;
  221. }
  222. static int mca_fe_enable_clocks(struct mca_cluster *cl)
  223. {
  224. struct mca_data *mca = cl->host;
  225. int ret;
  226. ret = clk_prepare_enable(cl->clk_parent);
  227. if (ret) {
  228. dev_err(mca->dev,
  229. "cluster %d: unable to enable clock parent: %d\n",
  230. cl->no, ret);
  231. return ret;
  232. }
  233. /*
  234. * We can't power up the device earlier than this because
  235. * the power state driver would error out on seeing the device
  236. * as clock-gated.
  237. */
  238. cl->pd_link = device_link_add(mca->dev, cl->pd_dev,
  239. DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
  240. DL_FLAG_RPM_ACTIVE);
  241. if (!cl->pd_link) {
  242. dev_err(mca->dev,
  243. "cluster %d: unable to prop-up power domain\n", cl->no);
  244. clk_disable_unprepare(cl->clk_parent);
  245. return -EINVAL;
  246. }
  247. writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL);
  248. mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN,
  249. SYNCGEN_STATUS_EN);
  250. mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, STATUS_MCLK_EN);
  251. return 0;
  252. }
  253. static void mca_fe_disable_clocks(struct mca_cluster *cl)
  254. {
  255. mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0);
  256. mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0);
  257. device_link_del(cl->pd_link);
  258. clk_disable_unprepare(cl->clk_parent);
  259. }
  260. static bool mca_fe_clocks_in_use(struct mca_cluster *cl)
  261. {
  262. struct mca_data *mca = cl->host;
  263. struct mca_cluster *be_cl;
  264. int stream, i;
  265. mutex_lock(&mca->port_mutex);
  266. for (i = 0; i < mca->nclusters; i++) {
  267. be_cl = &mca->clusters[i];
  268. if (be_cl->port_driver != cl->no)
  269. continue;
  270. for_each_pcm_streams(stream) {
  271. if (be_cl->clocks_in_use[stream]) {
  272. mutex_unlock(&mca->port_mutex);
  273. return true;
  274. }
  275. }
  276. }
  277. mutex_unlock(&mca->port_mutex);
  278. return false;
  279. }
  280. static int mca_be_prepare(struct snd_pcm_substream *substream,
  281. struct snd_soc_dai *dai)
  282. {
  283. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  284. struct mca_data *mca = cl->host;
  285. struct mca_cluster *fe_cl;
  286. int ret;
  287. if (cl->port_driver < 0)
  288. return -EINVAL;
  289. fe_cl = &mca->clusters[cl->port_driver];
  290. /*
  291. * Typically the CODECs we are paired with will require clocks
  292. * to be present at time of unmute with the 'mute_stream' op
  293. * or at time of DAPM widget power-up. We need to enable clocks
  294. * here at the latest (frontend prepare would be too late).
  295. */
  296. if (!mca_fe_clocks_in_use(fe_cl)) {
  297. ret = mca_fe_enable_clocks(fe_cl);
  298. if (ret < 0)
  299. return ret;
  300. }
  301. cl->clocks_in_use[substream->stream] = true;
  302. return 0;
  303. }
  304. static int mca_be_hw_free(struct snd_pcm_substream *substream,
  305. struct snd_soc_dai *dai)
  306. {
  307. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  308. struct mca_data *mca = cl->host;
  309. struct mca_cluster *fe_cl;
  310. if (cl->port_driver < 0)
  311. return -EINVAL;
  312. /*
  313. * We are operating on a foreign cluster here, but since we
  314. * belong to the same PCM, accesses should have been
  315. * synchronized at ASoC level.
  316. */
  317. fe_cl = &mca->clusters[cl->port_driver];
  318. if (!mca_fe_clocks_in_use(fe_cl))
  319. return 0; /* Nothing to do */
  320. cl->clocks_in_use[substream->stream] = false;
  321. if (!mca_fe_clocks_in_use(fe_cl))
  322. mca_fe_disable_clocks(fe_cl);
  323. return 0;
  324. }
  325. static unsigned int mca_crop_mask(unsigned int mask, int nchans)
  326. {
  327. while (hweight32(mask) > nchans)
  328. mask &= ~(1 << __fls(mask));
  329. return mask;
  330. }
  331. static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
  332. unsigned int mask, int slots, int nchans,
  333. int slot_width, bool is_tx, int port)
  334. {
  335. __iomem void *serdes_base = cl->base + serdes_unit;
  336. u32 serdes_conf, serdes_conf_mask;
  337. serdes_conf_mask = SERDES_CONF_WIDTH_MASK | SERDES_CONF_NCHANS;
  338. serdes_conf = FIELD_PREP(SERDES_CONF_NCHANS, max(slots, 1) - 1);
  339. switch (slot_width) {
  340. case 16:
  341. serdes_conf |= SERDES_CONF_WIDTH_16BIT;
  342. break;
  343. case 20:
  344. serdes_conf |= SERDES_CONF_WIDTH_20BIT;
  345. break;
  346. case 24:
  347. serdes_conf |= SERDES_CONF_WIDTH_24BIT;
  348. break;
  349. case 32:
  350. serdes_conf |= SERDES_CONF_WIDTH_32BIT;
  351. break;
  352. default:
  353. goto err;
  354. }
  355. serdes_conf_mask |= SERDES_CONF_SYNC_SEL;
  356. serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1);
  357. if (is_tx) {
  358. serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
  359. SERDES_CONF_UNK3;
  360. serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
  361. SERDES_CONF_UNK3;
  362. } else {
  363. serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
  364. SERDES_CONF_UNK3 |
  365. SERDES_CONF_NO_DATA_FEEDBACK;
  366. serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
  367. SERDES_CONF_NO_DATA_FEEDBACK;
  368. }
  369. mca_modify(cl,
  370. serdes_unit +
  371. (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF),
  372. serdes_conf_mask, serdes_conf);
  373. if (is_tx) {
  374. writel_relaxed(0xffffffff,
  375. serdes_base + REG_TX_SERDES_SLOTMASK);
  376. writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
  377. serdes_base + REG_TX_SERDES_SLOTMASK + 0x4);
  378. writel_relaxed(0xffffffff,
  379. serdes_base + REG_TX_SERDES_SLOTMASK + 0x8);
  380. writel_relaxed(~((u32)mask),
  381. serdes_base + REG_TX_SERDES_SLOTMASK + 0xc);
  382. } else {
  383. writel_relaxed(0xffffffff,
  384. serdes_base + REG_RX_SERDES_SLOTMASK);
  385. writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
  386. serdes_base + REG_RX_SERDES_SLOTMASK + 0x4);
  387. writel_relaxed(1 << port,
  388. serdes_base + REG_RX_SERDES_PORT);
  389. }
  390. return 0;
  391. err:
  392. dev_err(cl->host->dev,
  393. "unsupported SERDES configuration requested (mask=0x%x slots=%d slot_width=%d)\n",
  394. mask, slots, slot_width);
  395. return -EINVAL;
  396. }
  397. static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
  398. unsigned int rx_mask, int slots, int slot_width)
  399. {
  400. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  401. cl->tdm_slots = slots;
  402. cl->tdm_slot_width = slot_width;
  403. cl->tdm_tx_mask = tx_mask;
  404. cl->tdm_rx_mask = rx_mask;
  405. return 0;
  406. }
  407. static int mca_fe_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
  408. {
  409. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  410. struct mca_data *mca = cl->host;
  411. bool fpol_inv = false;
  412. u32 serdes_conf = 0;
  413. u32 bitstart;
  414. if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) !=
  415. SND_SOC_DAIFMT_BP_FP)
  416. goto err;
  417. switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
  418. case SND_SOC_DAIFMT_I2S:
  419. fpol_inv = 0;
  420. bitstart = 1;
  421. break;
  422. case SND_SOC_DAIFMT_LEFT_J:
  423. fpol_inv = 1;
  424. bitstart = 0;
  425. break;
  426. default:
  427. goto err;
  428. }
  429. switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
  430. case SND_SOC_DAIFMT_NB_IF:
  431. case SND_SOC_DAIFMT_IB_IF:
  432. fpol_inv ^= 1;
  433. break;
  434. }
  435. switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
  436. case SND_SOC_DAIFMT_NB_NF:
  437. case SND_SOC_DAIFMT_NB_IF:
  438. serdes_conf |= SERDES_CONF_BCLK_POL;
  439. break;
  440. }
  441. if (!fpol_inv)
  442. goto err;
  443. mca_modify(cl, CLUSTER_TX_OFF + REG_TX_SERDES_CONF,
  444. SERDES_CONF_BCLK_POL, serdes_conf);
  445. mca_modify(cl, CLUSTER_RX_OFF + REG_RX_SERDES_CONF,
  446. SERDES_CONF_BCLK_POL, serdes_conf);
  447. writel_relaxed(bitstart,
  448. cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART);
  449. writel_relaxed(bitstart,
  450. cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART);
  451. return 0;
  452. err:
  453. dev_err(mca->dev, "unsupported DAI format (0x%x) requested\n", fmt);
  454. return -EINVAL;
  455. }
  456. static int mca_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
  457. {
  458. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  459. cl->bclk_ratio = ratio;
  460. return 0;
  461. }
  462. static int mca_fe_get_port(struct snd_pcm_substream *substream)
  463. {
  464. struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
  465. struct snd_soc_pcm_runtime *be;
  466. struct snd_soc_dpcm *dpcm;
  467. be = NULL;
  468. for_each_dpcm_be(fe, substream->stream, dpcm) {
  469. be = dpcm->be;
  470. break;
  471. }
  472. if (!be)
  473. return -EINVAL;
  474. return mca_dai_to_cluster(asoc_rtd_to_cpu(be, 0))->no;
  475. }
  476. static int mca_fe_hw_params(struct snd_pcm_substream *substream,
  477. struct snd_pcm_hw_params *params,
  478. struct snd_soc_dai *dai)
  479. {
  480. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  481. struct mca_data *mca = cl->host;
  482. struct device *dev = mca->dev;
  483. unsigned int samp_rate = params_rate(params);
  484. bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
  485. bool refine_tdm = false;
  486. unsigned long bclk_ratio;
  487. unsigned int tdm_slots, tdm_slot_width, tdm_mask;
  488. u32 regval, pad;
  489. int ret, port, nchans_ceiled;
  490. if (!cl->tdm_slot_width) {
  491. /*
  492. * We were not given TDM settings from above, set initial
  493. * guesses which will later be refined.
  494. */
  495. tdm_slot_width = params_width(params);
  496. tdm_slots = params_channels(params);
  497. refine_tdm = true;
  498. } else {
  499. tdm_slot_width = cl->tdm_slot_width;
  500. tdm_slots = cl->tdm_slots;
  501. tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask;
  502. }
  503. if (cl->bclk_ratio)
  504. bclk_ratio = cl->bclk_ratio;
  505. else
  506. bclk_ratio = tdm_slot_width * tdm_slots;
  507. if (refine_tdm) {
  508. int nchannels = params_channels(params);
  509. if (nchannels > 2) {
  510. dev_err(dev, "missing TDM for stream with two or more channels\n");
  511. return -EINVAL;
  512. }
  513. if ((bclk_ratio % nchannels) != 0) {
  514. dev_err(dev, "BCLK ratio (%ld) not divisible by no. of channels (%d)\n",
  515. bclk_ratio, nchannels);
  516. return -EINVAL;
  517. }
  518. tdm_slot_width = bclk_ratio / nchannels;
  519. if (tdm_slot_width > 32 && nchannels == 1)
  520. tdm_slot_width = 32;
  521. if (tdm_slot_width < params_width(params)) {
  522. dev_err(dev, "TDM slots too narrow (tdm=%d params=%d)\n",
  523. tdm_slot_width, params_width(params));
  524. return -EINVAL;
  525. }
  526. tdm_mask = (1 << tdm_slots) - 1;
  527. }
  528. port = mca_fe_get_port(substream);
  529. if (port < 0)
  530. return port;
  531. ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF,
  532. tdm_mask, tdm_slots, params_channels(params),
  533. tdm_slot_width, is_tx, port);
  534. if (ret)
  535. return ret;
  536. pad = 32 - params_width(params);
  537. /*
  538. * TODO: Here the register semantics aren't clear.
  539. */
  540. nchans_ceiled = min_t(int, params_channels(params), 4);
  541. regval = FIELD_PREP(DMA_ADAPTER_NCHANS, nchans_ceiled) |
  542. FIELD_PREP(DMA_ADAPTER_TX_NCHANS, 0x2) |
  543. FIELD_PREP(DMA_ADAPTER_RX_NCHANS, 0x2) |
  544. FIELD_PREP(DMA_ADAPTER_TX_LSB_PAD, pad) |
  545. FIELD_PREP(DMA_ADAPTER_RX_MSB_PAD, pad);
  546. #ifndef USE_RXB_FOR_CAPTURE
  547. writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
  548. #else
  549. if (is_tx)
  550. writel_relaxed(regval,
  551. mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
  552. else
  553. writel_relaxed(regval,
  554. mca->switch_base + REG_DMA_ADAPTER_B(cl->no));
  555. #endif
  556. if (!mca_fe_clocks_in_use(cl)) {
  557. /*
  558. * Set up FSYNC duty cycle as even as possible.
  559. */
  560. writel_relaxed((bclk_ratio / 2) - 1,
  561. cl->base + REG_SYNCGEN_HI_PERIOD);
  562. writel_relaxed(((bclk_ratio + 1) / 2) - 1,
  563. cl->base + REG_SYNCGEN_LO_PERIOD);
  564. writel_relaxed(FIELD_PREP(MCLK_CONF_DIV, 0x1),
  565. cl->base + REG_MCLK_CONF);
  566. ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate);
  567. if (ret) {
  568. dev_err(mca->dev, "cluster %d: unable to set clock parent: %d\n",
  569. cl->no, ret);
  570. return ret;
  571. }
  572. }
  573. return 0;
  574. }
  575. static const struct snd_soc_dai_ops mca_fe_ops = {
  576. .set_fmt = mca_fe_set_fmt,
  577. .set_bclk_ratio = mca_set_bclk_ratio,
  578. .set_tdm_slot = mca_fe_set_tdm_slot,
  579. .hw_params = mca_fe_hw_params,
  580. .trigger = mca_fe_trigger,
  581. };
  582. static bool mca_be_started(struct mca_cluster *cl)
  583. {
  584. int stream;
  585. for_each_pcm_streams(stream)
  586. if (cl->port_started[stream])
  587. return true;
  588. return false;
  589. }
  590. static int mca_be_startup(struct snd_pcm_substream *substream,
  591. struct snd_soc_dai *dai)
  592. {
  593. struct snd_soc_pcm_runtime *be = asoc_substream_to_rtd(substream);
  594. struct snd_soc_pcm_runtime *fe;
  595. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  596. struct mca_cluster *fe_cl;
  597. struct mca_data *mca = cl->host;
  598. struct snd_soc_dpcm *dpcm;
  599. fe = NULL;
  600. for_each_dpcm_fe(be, substream->stream, dpcm) {
  601. if (fe && dpcm->fe != fe) {
  602. dev_err(mca->dev, "many FE per one BE unsupported\n");
  603. return -EINVAL;
  604. }
  605. fe = dpcm->fe;
  606. }
  607. if (!fe)
  608. return -EINVAL;
  609. fe_cl = mca_dai_to_cluster(asoc_rtd_to_cpu(fe, 0));
  610. if (mca_be_started(cl)) {
  611. /*
  612. * Port is already started in the other direction.
  613. * Make sure there isn't a conflict with another cluster
  614. * driving the port.
  615. */
  616. if (cl->port_driver != fe_cl->no)
  617. return -EINVAL;
  618. cl->port_started[substream->stream] = true;
  619. return 0;
  620. }
  621. writel_relaxed(PORT_ENABLES_CLOCKS | PORT_ENABLES_TX_DATA,
  622. cl->base + REG_PORT_ENABLES);
  623. writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1),
  624. cl->base + REG_PORT_CLOCK_SEL);
  625. writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no),
  626. cl->base + REG_PORT_DATA_SEL);
  627. mutex_lock(&mca->port_mutex);
  628. cl->port_driver = fe_cl->no;
  629. mutex_unlock(&mca->port_mutex);
  630. cl->port_started[substream->stream] = true;
  631. return 0;
  632. }
  633. static void mca_be_shutdown(struct snd_pcm_substream *substream,
  634. struct snd_soc_dai *dai)
  635. {
  636. struct mca_cluster *cl = mca_dai_to_cluster(dai);
  637. struct mca_data *mca = cl->host;
  638. cl->port_started[substream->stream] = false;
  639. if (!mca_be_started(cl)) {
  640. /*
  641. * Were we the last direction to shutdown?
  642. * Turn off the lights.
  643. */
  644. writel_relaxed(0, cl->base + REG_PORT_ENABLES);
  645. writel_relaxed(0, cl->base + REG_PORT_DATA_SEL);
  646. mutex_lock(&mca->port_mutex);
  647. cl->port_driver = -1;
  648. mutex_unlock(&mca->port_mutex);
  649. }
  650. }
  651. static const struct snd_soc_dai_ops mca_be_ops = {
  652. .prepare = mca_be_prepare,
  653. .hw_free = mca_be_hw_free,
  654. .startup = mca_be_startup,
  655. .shutdown = mca_be_shutdown,
  656. };
  657. static int mca_set_runtime_hwparams(struct snd_soc_component *component,
  658. struct snd_pcm_substream *substream,
  659. struct dma_chan *chan)
  660. {
  661. struct device *dma_dev = chan->device->dev;
  662. struct snd_dmaengine_dai_dma_data dma_data = {};
  663. int ret;
  664. struct snd_pcm_hardware hw;
  665. memset(&hw, 0, sizeof(hw));
  666. hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
  667. SNDRV_PCM_INFO_INTERLEAVED;
  668. hw.periods_min = 2;
  669. hw.periods_max = UINT_MAX;
  670. hw.period_bytes_min = 256;
  671. hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
  672. hw.buffer_bytes_max = SIZE_MAX;
  673. hw.fifo_size = 16;
  674. ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream, &dma_data,
  675. &hw, chan);
  676. if (ret)
  677. return ret;
  678. return snd_soc_set_runtime_hwparams(substream, &hw);
  679. }
  680. static int mca_pcm_open(struct snd_soc_component *component,
  681. struct snd_pcm_substream *substream)
  682. {
  683. struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
  684. struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0));
  685. struct dma_chan *chan = cl->dma_chans[substream->stream];
  686. int ret;
  687. if (rtd->dai_link->no_pcm)
  688. return 0;
  689. ret = mca_set_runtime_hwparams(component, substream, chan);
  690. if (ret)
  691. return ret;
  692. return snd_dmaengine_pcm_open(substream, chan);
  693. }
  694. static int mca_hw_params(struct snd_soc_component *component,
  695. struct snd_pcm_substream *substream,
  696. struct snd_pcm_hw_params *params)
  697. {
  698. struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
  699. struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
  700. struct dma_slave_config slave_config;
  701. int ret;
  702. if (rtd->dai_link->no_pcm)
  703. return 0;
  704. memset(&slave_config, 0, sizeof(slave_config));
  705. ret = snd_hwparams_to_dma_slave_config(substream, params,
  706. &slave_config);
  707. if (ret < 0)
  708. return ret;
  709. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  710. slave_config.dst_port_window_size =
  711. min_t(u32, params_channels(params), 4);
  712. else
  713. slave_config.src_port_window_size =
  714. min_t(u32, params_channels(params), 4);
  715. return dmaengine_slave_config(chan, &slave_config);
  716. }
  717. static int mca_close(struct snd_soc_component *component,
  718. struct snd_pcm_substream *substream)
  719. {
  720. struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
  721. if (rtd->dai_link->no_pcm)
  722. return 0;
  723. return snd_dmaengine_pcm_close(substream);
  724. }
  725. static int mca_trigger(struct snd_soc_component *component,
  726. struct snd_pcm_substream *substream, int cmd)
  727. {
  728. struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
  729. if (rtd->dai_link->no_pcm)
  730. return 0;
  731. /*
  732. * Before we do the PCM trigger proper, insert an opportunity
  733. * to reset the frontend's SERDES.
  734. */
  735. mca_fe_early_trigger(substream, cmd, asoc_rtd_to_cpu(rtd, 0));
  736. return snd_dmaengine_pcm_trigger(substream, cmd);
  737. }
  738. static snd_pcm_uframes_t mca_pointer(struct snd_soc_component *component,
  739. struct snd_pcm_substream *substream)
  740. {
  741. struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
  742. if (rtd->dai_link->no_pcm)
  743. return -ENOTSUPP;
  744. return snd_dmaengine_pcm_pointer(substream);
  745. }
  746. static struct dma_chan *mca_request_dma_channel(struct mca_cluster *cl, unsigned int stream)
  747. {
  748. bool is_tx = (stream == SNDRV_PCM_STREAM_PLAYBACK);
  749. #ifndef USE_RXB_FOR_CAPTURE
  750. char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
  751. is_tx ? "tx%da" : "rx%da", cl->no);
  752. #else
  753. char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
  754. is_tx ? "tx%da" : "rx%db", cl->no);
  755. #endif
  756. return of_dma_request_slave_channel(cl->host->dev->of_node, name);
  757. }
  758. static void mca_pcm_free(struct snd_soc_component *component,
  759. struct snd_pcm *pcm)
  760. {
  761. struct snd_soc_pcm_runtime *rtd = snd_pcm_chip(pcm);
  762. struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0));
  763. unsigned int i;
  764. if (rtd->dai_link->no_pcm)
  765. return;
  766. for_each_pcm_streams(i) {
  767. struct snd_pcm_substream *substream =
  768. rtd->pcm->streams[i].substream;
  769. if (!substream || !cl->dma_chans[i])
  770. continue;
  771. dma_release_channel(cl->dma_chans[i]);
  772. cl->dma_chans[i] = NULL;
  773. }
  774. }
  775. static int mca_pcm_new(struct snd_soc_component *component,
  776. struct snd_soc_pcm_runtime *rtd)
  777. {
  778. struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0));
  779. unsigned int i;
  780. if (rtd->dai_link->no_pcm)
  781. return 0;
  782. for_each_pcm_streams(i) {
  783. struct snd_pcm_substream *substream =
  784. rtd->pcm->streams[i].substream;
  785. struct dma_chan *chan;
  786. if (!substream)
  787. continue;
  788. chan = mca_request_dma_channel(cl, i);
  789. if (IS_ERR_OR_NULL(chan)) {
  790. mca_pcm_free(component, rtd->pcm);
  791. if (chan && PTR_ERR(chan) == -EPROBE_DEFER)
  792. return PTR_ERR(chan);
  793. dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n",
  794. i, cl->no, chan);
  795. if (!chan)
  796. return -EINVAL;
  797. return PTR_ERR(chan);
  798. }
  799. cl->dma_chans[i] = chan;
  800. snd_pcm_set_managed_buffer(substream, SNDRV_DMA_TYPE_DEV_IRAM,
  801. chan->device->dev, 512 * 1024 * 6,
  802. SIZE_MAX);
  803. }
  804. return 0;
  805. }
  806. static const struct snd_soc_component_driver mca_component = {
  807. .name = "apple-mca",
  808. .open = mca_pcm_open,
  809. .close = mca_close,
  810. .hw_params = mca_hw_params,
  811. .trigger = mca_trigger,
  812. .pointer = mca_pointer,
  813. .pcm_construct = mca_pcm_new,
  814. .pcm_destruct = mca_pcm_free,
  815. };
  816. static void apple_mca_release(struct mca_data *mca)
  817. {
  818. int i;
  819. for (i = 0; i < mca->nclusters; i++) {
  820. struct mca_cluster *cl = &mca->clusters[i];
  821. if (!IS_ERR_OR_NULL(cl->clk_parent))
  822. clk_put(cl->clk_parent);
  823. if (!IS_ERR_OR_NULL(cl->pd_dev))
  824. dev_pm_domain_detach(cl->pd_dev, true);
  825. }
  826. if (mca->pd_link)
  827. device_link_del(mca->pd_link);
  828. if (!IS_ERR_OR_NULL(mca->pd_dev))
  829. dev_pm_domain_detach(mca->pd_dev, true);
  830. reset_control_rearm(mca->rstc);
  831. }
  832. static int apple_mca_probe(struct platform_device *pdev)
  833. {
  834. struct mca_data *mca;
  835. struct mca_cluster *clusters;
  836. struct snd_soc_dai_driver *dai_drivers;
  837. struct resource *res;
  838. void __iomem *base;
  839. int nclusters;
  840. int ret, i;
  841. base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
  842. if (IS_ERR(base))
  843. return PTR_ERR(base);
  844. if (resource_size(res) < CLUSTER_STRIDE)
  845. return -EINVAL;
  846. nclusters = (resource_size(res) - CLUSTER_STRIDE) / CLUSTER_STRIDE + 1;
  847. mca = devm_kzalloc(&pdev->dev, struct_size(mca, clusters, nclusters),
  848. GFP_KERNEL);
  849. if (!mca)
  850. return -ENOMEM;
  851. mca->dev = &pdev->dev;
  852. mca->nclusters = nclusters;
  853. mutex_init(&mca->port_mutex);
  854. platform_set_drvdata(pdev, mca);
  855. clusters = mca->clusters;
  856. mca->switch_base =
  857. devm_platform_ioremap_resource(pdev, 1);
  858. if (IS_ERR(mca->switch_base))
  859. return PTR_ERR(mca->switch_base);
  860. mca->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
  861. if (IS_ERR(mca->rstc))
  862. return PTR_ERR(mca->rstc);
  863. dai_drivers = devm_kzalloc(
  864. &pdev->dev, sizeof(*dai_drivers) * 2 * nclusters, GFP_KERNEL);
  865. if (!dai_drivers)
  866. return -ENOMEM;
  867. mca->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, 0);
  868. if (IS_ERR(mca->pd_dev))
  869. return -EINVAL;
  870. mca->pd_link = device_link_add(&pdev->dev, mca->pd_dev,
  871. DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
  872. DL_FLAG_RPM_ACTIVE);
  873. if (!mca->pd_link) {
  874. ret = -EINVAL;
  875. /* Prevent an unbalanced reset rearm */
  876. mca->rstc = NULL;
  877. goto err_release;
  878. }
  879. reset_control_reset(mca->rstc);
  880. for (i = 0; i < nclusters; i++) {
  881. struct mca_cluster *cl = &clusters[i];
  882. struct snd_soc_dai_driver *fe =
  883. &dai_drivers[mca->nclusters + i];
  884. struct snd_soc_dai_driver *be = &dai_drivers[i];
  885. cl->host = mca;
  886. cl->no = i;
  887. cl->base = base + CLUSTER_STRIDE * i;
  888. cl->port_driver = -1;
  889. cl->clk_parent = of_clk_get(pdev->dev.of_node, i);
  890. if (IS_ERR(cl->clk_parent)) {
  891. dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n",
  892. i, PTR_ERR(cl->clk_parent));
  893. ret = PTR_ERR(cl->clk_parent);
  894. goto err_release;
  895. }
  896. cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1);
  897. if (IS_ERR(cl->pd_dev)) {
  898. dev_err(&pdev->dev,
  899. "unable to obtain cluster %d PD: %ld\n", i,
  900. PTR_ERR(cl->pd_dev));
  901. ret = PTR_ERR(cl->pd_dev);
  902. goto err_release;
  903. }
  904. fe->id = i;
  905. fe->name =
  906. devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-pcm-%d", i);
  907. if (!fe->name) {
  908. ret = -ENOMEM;
  909. goto err_release;
  910. }
  911. fe->ops = &mca_fe_ops;
  912. fe->playback.channels_min = 1;
  913. fe->playback.channels_max = 32;
  914. fe->playback.rates = SNDRV_PCM_RATE_8000_192000;
  915. fe->playback.formats = APPLE_MCA_FMTBITS;
  916. fe->capture.channels_min = 1;
  917. fe->capture.channels_max = 32;
  918. fe->capture.rates = SNDRV_PCM_RATE_8000_192000;
  919. fe->capture.formats = APPLE_MCA_FMTBITS;
  920. fe->symmetric_rate = 1;
  921. fe->playback.stream_name =
  922. devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d TX", i);
  923. fe->capture.stream_name =
  924. devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d RX", i);
  925. if (!fe->playback.stream_name || !fe->capture.stream_name) {
  926. ret = -ENOMEM;
  927. goto err_release;
  928. }
  929. be->id = i + nclusters;
  930. be->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-i2s-%d", i);
  931. if (!be->name) {
  932. ret = -ENOMEM;
  933. goto err_release;
  934. }
  935. be->ops = &mca_be_ops;
  936. be->playback.channels_min = 1;
  937. be->playback.channels_max = 32;
  938. be->playback.rates = SNDRV_PCM_RATE_8000_192000;
  939. be->playback.formats = APPLE_MCA_FMTBITS;
  940. be->capture.channels_min = 1;
  941. be->capture.channels_max = 32;
  942. be->capture.rates = SNDRV_PCM_RATE_8000_192000;
  943. be->capture.formats = APPLE_MCA_FMTBITS;
  944. be->playback.stream_name =
  945. devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d TX", i);
  946. be->capture.stream_name =
  947. devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d RX", i);
  948. if (!be->playback.stream_name || !be->capture.stream_name) {
  949. ret = -ENOMEM;
  950. goto err_release;
  951. }
  952. }
  953. ret = snd_soc_register_component(&pdev->dev, &mca_component,
  954. dai_drivers, nclusters * 2);
  955. if (ret) {
  956. dev_err(&pdev->dev, "unable to register ASoC component: %d\n",
  957. ret);
  958. goto err_release;
  959. }
  960. return 0;
  961. err_release:
  962. apple_mca_release(mca);
  963. return ret;
  964. }
  965. static int apple_mca_remove(struct platform_device *pdev)
  966. {
  967. struct mca_data *mca = platform_get_drvdata(pdev);
  968. snd_soc_unregister_component(&pdev->dev);
  969. apple_mca_release(mca);
  970. return 0;
  971. }
  972. static const struct of_device_id apple_mca_of_match[] = {
  973. { .compatible = "apple,mca", },
  974. {}
  975. };
  976. MODULE_DEVICE_TABLE(of, apple_mca_of_match);
  977. static struct platform_driver apple_mca_driver = {
  978. .driver = {
  979. .name = "apple-mca",
  980. .of_match_table = apple_mca_of_match,
  981. },
  982. .probe = apple_mca_probe,
  983. .remove = apple_mca_remove,
  984. };
  985. module_platform_driver(apple_mca_driver);
  986. MODULE_AUTHOR("Martin Povišer <[email protected]>");
  987. MODULE_DESCRIPTION("ASoC Apple MCA driver");
  988. MODULE_LICENSE("GPL");