bus.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967
  1. // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2. // Copyright(c) 2015-17 Intel Corporation.
  3. #include <linux/acpi.h>
  4. #include <linux/delay.h>
  5. #include <linux/mod_devicetable.h>
  6. #include <linux/pm_runtime.h>
  7. #include <linux/soundwire/sdw_registers.h>
  8. #include <linux/soundwire/sdw.h>
  9. #include <linux/soundwire/sdw_type.h>
  10. #include "bus.h"
  11. #include "sysfs_local.h"
  12. static DEFINE_IDA(sdw_bus_ida);
  13. static DEFINE_IDA(sdw_peripheral_ida);
  14. static int sdw_get_id(struct sdw_bus *bus)
  15. {
  16. int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL);
  17. if (rc < 0)
  18. return rc;
  19. bus->id = rc;
  20. return 0;
  21. }
  22. /**
  23. * sdw_bus_master_add() - add a bus Master instance
  24. * @bus: bus instance
  25. * @parent: parent device
  26. * @fwnode: firmware node handle
  27. *
  28. * Initializes the bus instance, read properties and create child
  29. * devices.
  30. */
  31. int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
  32. struct fwnode_handle *fwnode)
  33. {
  34. struct sdw_master_prop *prop = NULL;
  35. int ret;
  36. if (!parent) {
  37. pr_err("SoundWire parent device is not set\n");
  38. return -ENODEV;
  39. }
  40. ret = sdw_get_id(bus);
  41. if (ret < 0) {
  42. dev_err(parent, "Failed to get bus id\n");
  43. return ret;
  44. }
  45. ret = sdw_master_device_add(bus, parent, fwnode);
  46. if (ret < 0) {
  47. dev_err(parent, "Failed to add master device at link %d\n",
  48. bus->link_id);
  49. return ret;
  50. }
  51. if (!bus->ops) {
  52. dev_err(bus->dev, "SoundWire Bus ops are not set\n");
  53. return -EINVAL;
  54. }
  55. if (!bus->compute_params) {
  56. dev_err(bus->dev,
  57. "Bandwidth allocation not configured, compute_params no set\n");
  58. return -EINVAL;
  59. }
  60. mutex_init(&bus->msg_lock);
  61. mutex_init(&bus->bus_lock);
  62. INIT_LIST_HEAD(&bus->slaves);
  63. INIT_LIST_HEAD(&bus->m_rt_list);
  64. /*
  65. * Initialize multi_link flag
  66. */
  67. bus->multi_link = false;
  68. if (bus->ops->read_prop) {
  69. ret = bus->ops->read_prop(bus);
  70. if (ret < 0) {
  71. dev_err(bus->dev,
  72. "Bus read properties failed:%d\n", ret);
  73. return ret;
  74. }
  75. }
  76. sdw_bus_debugfs_init(bus);
  77. /*
  78. * Device numbers in SoundWire are 0 through 15. Enumeration device
  79. * number (0), Broadcast device number (15), Group numbers (12 and
  80. * 13) and Master device number (14) are not used for assignment so
  81. * mask these and other higher bits.
  82. */
  83. /* Set higher order bits */
  84. *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM);
  85. /* Set enumuration device number and broadcast device number */
  86. set_bit(SDW_ENUM_DEV_NUM, bus->assigned);
  87. set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned);
  88. /* Set group device numbers and master device number */
  89. set_bit(SDW_GROUP12_DEV_NUM, bus->assigned);
  90. set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
  91. set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
  92. /*
  93. * SDW is an enumerable bus, but devices can be powered off. So,
  94. * they won't be able to report as present.
  95. *
  96. * Create Slave devices based on Slaves described in
  97. * the respective firmware (ACPI/DT)
  98. */
  99. if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev))
  100. ret = sdw_acpi_find_slaves(bus);
  101. else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node)
  102. ret = sdw_of_find_slaves(bus);
  103. else
  104. ret = -ENOTSUPP; /* No ACPI/DT so error out */
  105. if (ret < 0) {
  106. dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
  107. return ret;
  108. }
  109. /*
  110. * Initialize clock values based on Master properties. The max
  111. * frequency is read from max_clk_freq property. Current assumption
  112. * is that the bus will start at highest clock frequency when
  113. * powered on.
  114. *
  115. * Default active bank will be 0 as out of reset the Slaves have
  116. * to start with bank 0 (Table 40 of Spec)
  117. */
  118. prop = &bus->prop;
  119. bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
  120. bus->params.curr_dr_freq = bus->params.max_dr_freq;
  121. bus->params.curr_bank = SDW_BANK0;
  122. bus->params.next_bank = SDW_BANK1;
  123. return 0;
  124. }
  125. EXPORT_SYMBOL(sdw_bus_master_add);
  126. static int sdw_delete_slave(struct device *dev, void *data)
  127. {
  128. struct sdw_slave *slave = dev_to_sdw_dev(dev);
  129. struct sdw_bus *bus = slave->bus;
  130. pm_runtime_disable(dev);
  131. sdw_slave_debugfs_exit(slave);
  132. mutex_lock(&bus->bus_lock);
  133. if (slave->dev_num) { /* clear dev_num if assigned */
  134. clear_bit(slave->dev_num, bus->assigned);
  135. if (bus->dev_num_ida_min)
  136. ida_free(&sdw_peripheral_ida, slave->dev_num);
  137. }
  138. list_del_init(&slave->node);
  139. mutex_unlock(&bus->bus_lock);
  140. device_unregister(dev);
  141. return 0;
  142. }
  143. /**
  144. * sdw_bus_master_delete() - delete the bus master instance
  145. * @bus: bus to be deleted
  146. *
  147. * Remove the instance, delete the child devices.
  148. */
  149. void sdw_bus_master_delete(struct sdw_bus *bus)
  150. {
  151. device_for_each_child(bus->dev, NULL, sdw_delete_slave);
  152. sdw_master_device_del(bus);
  153. sdw_bus_debugfs_exit(bus);
  154. ida_free(&sdw_bus_ida, bus->id);
  155. }
  156. EXPORT_SYMBOL(sdw_bus_master_delete);
  157. /*
  158. * SDW IO Calls
  159. */
  160. static inline int find_response_code(enum sdw_command_response resp)
  161. {
  162. switch (resp) {
  163. case SDW_CMD_OK:
  164. return 0;
  165. case SDW_CMD_IGNORED:
  166. return -ENODATA;
  167. case SDW_CMD_TIMEOUT:
  168. return -ETIMEDOUT;
  169. default:
  170. return -EIO;
  171. }
  172. }
  173. static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
  174. {
  175. int retry = bus->prop.err_threshold;
  176. enum sdw_command_response resp;
  177. int ret = 0, i;
  178. for (i = 0; i <= retry; i++) {
  179. resp = bus->ops->xfer_msg(bus, msg);
  180. ret = find_response_code(resp);
  181. /* if cmd is ok or ignored return */
  182. if (ret == 0 || ret == -ENODATA)
  183. return ret;
  184. }
  185. return ret;
  186. }
  187. static inline int do_transfer_defer(struct sdw_bus *bus,
  188. struct sdw_msg *msg,
  189. struct sdw_defer *defer)
  190. {
  191. int retry = bus->prop.err_threshold;
  192. enum sdw_command_response resp;
  193. int ret = 0, i;
  194. defer->msg = msg;
  195. defer->length = msg->len;
  196. init_completion(&defer->complete);
  197. for (i = 0; i <= retry; i++) {
  198. resp = bus->ops->xfer_msg_defer(bus, msg, defer);
  199. ret = find_response_code(resp);
  200. /* if cmd is ok or ignored return */
  201. if (ret == 0 || ret == -ENODATA)
  202. return ret;
  203. }
  204. return ret;
  205. }
  206. static int sdw_reset_page(struct sdw_bus *bus, u16 dev_num)
  207. {
  208. int retry = bus->prop.err_threshold;
  209. enum sdw_command_response resp;
  210. int ret = 0, i;
  211. for (i = 0; i <= retry; i++) {
  212. resp = bus->ops->reset_page_addr(bus, dev_num);
  213. ret = find_response_code(resp);
  214. /* if cmd is ok or ignored return */
  215. if (ret == 0 || ret == -ENODATA)
  216. return ret;
  217. }
  218. return ret;
  219. }
  220. static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
  221. {
  222. int ret;
  223. ret = do_transfer(bus, msg);
  224. if (ret != 0 && ret != -ENODATA)
  225. dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n",
  226. msg->dev_num, ret,
  227. (msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read",
  228. msg->addr, msg->len);
  229. if (msg->page)
  230. sdw_reset_page(bus, msg->dev_num);
  231. return ret;
  232. }
  233. /**
  234. * sdw_transfer() - Synchronous transfer message to a SDW Slave device
  235. * @bus: SDW bus
  236. * @msg: SDW message to be xfered
  237. */
  238. int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
  239. {
  240. int ret;
  241. mutex_lock(&bus->msg_lock);
  242. ret = sdw_transfer_unlocked(bus, msg);
  243. mutex_unlock(&bus->msg_lock);
  244. return ret;
  245. }
  246. /**
  247. * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers
  248. * @bus: SDW bus
  249. * @sync_delay: Delay before reading status
  250. */
  251. void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay)
  252. {
  253. u32 status;
  254. if (!bus->ops->read_ping_status)
  255. return;
  256. /*
  257. * wait for peripheral to sync if desired. 10-15ms should be more than
  258. * enough in most cases.
  259. */
  260. if (sync_delay)
  261. usleep_range(10000, 15000);
  262. mutex_lock(&bus->msg_lock);
  263. status = bus->ops->read_ping_status(bus);
  264. mutex_unlock(&bus->msg_lock);
  265. if (!status)
  266. dev_warn(bus->dev, "%s: no peripherals attached\n", __func__);
  267. else
  268. dev_dbg(bus->dev, "PING status: %#x\n", status);
  269. }
  270. EXPORT_SYMBOL(sdw_show_ping_status);
  271. /**
  272. * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
  273. * @bus: SDW bus
  274. * @msg: SDW message to be xfered
  275. * @defer: Defer block for signal completion
  276. *
  277. * Caller needs to hold the msg_lock lock while calling this
  278. */
  279. int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
  280. struct sdw_defer *defer)
  281. {
  282. int ret;
  283. if (!bus->ops->xfer_msg_defer)
  284. return -ENOTSUPP;
  285. ret = do_transfer_defer(bus, msg, defer);
  286. if (ret != 0 && ret != -ENODATA)
  287. dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
  288. msg->dev_num, ret);
  289. if (msg->page)
  290. sdw_reset_page(bus, msg->dev_num);
  291. return ret;
  292. }
  293. int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
  294. u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
  295. {
  296. memset(msg, 0, sizeof(*msg));
  297. msg->addr = addr; /* addr is 16 bit and truncated here */
  298. msg->len = count;
  299. msg->dev_num = dev_num;
  300. msg->flags = flags;
  301. msg->buf = buf;
  302. if (addr < SDW_REG_NO_PAGE) /* no paging area */
  303. return 0;
  304. if (addr >= SDW_REG_MAX) { /* illegal addr */
  305. pr_err("SDW: Invalid address %x passed\n", addr);
  306. return -EINVAL;
  307. }
  308. if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
  309. if (slave && !slave->prop.paging_support)
  310. return 0;
  311. /* no need for else as that will fall-through to paging */
  312. }
  313. /* paging mandatory */
  314. if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) {
  315. pr_err("SDW: Invalid device for paging :%d\n", dev_num);
  316. return -EINVAL;
  317. }
  318. if (!slave) {
  319. pr_err("SDW: No slave for paging addr\n");
  320. return -EINVAL;
  321. }
  322. if (!slave->prop.paging_support) {
  323. dev_err(&slave->dev,
  324. "address %x needs paging but no support\n", addr);
  325. return -EINVAL;
  326. }
  327. msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr);
  328. msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr);
  329. msg->addr |= BIT(15);
  330. msg->page = true;
  331. return 0;
  332. }
  333. /*
  334. * Read/Write IO functions.
  335. * no_pm versions can only be called by the bus, e.g. while enumerating or
  336. * handling suspend-resume sequences.
  337. * all clients need to use the pm versions
  338. */
  339. static int
  340. sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
  341. {
  342. struct sdw_msg msg;
  343. int ret;
  344. ret = sdw_fill_msg(&msg, slave, addr, count,
  345. slave->dev_num, SDW_MSG_FLAG_READ, val);
  346. if (ret < 0)
  347. return ret;
  348. ret = sdw_transfer(slave->bus, &msg);
  349. if (slave->is_mockup_device)
  350. ret = 0;
  351. return ret;
  352. }
  353. static int
  354. sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
  355. {
  356. struct sdw_msg msg;
  357. int ret;
  358. ret = sdw_fill_msg(&msg, slave, addr, count,
  359. slave->dev_num, SDW_MSG_FLAG_WRITE, (u8 *)val);
  360. if (ret < 0)
  361. return ret;
  362. ret = sdw_transfer(slave->bus, &msg);
  363. if (slave->is_mockup_device)
  364. ret = 0;
  365. return ret;
  366. }
  367. int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
  368. {
  369. return sdw_nwrite_no_pm(slave, addr, 1, &value);
  370. }
  371. EXPORT_SYMBOL(sdw_write_no_pm);
  372. static int
  373. sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
  374. {
  375. struct sdw_msg msg;
  376. u8 buf;
  377. int ret;
  378. ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
  379. SDW_MSG_FLAG_READ, &buf);
  380. if (ret < 0)
  381. return ret;
  382. ret = sdw_transfer(bus, &msg);
  383. if (ret < 0)
  384. return ret;
  385. return buf;
  386. }
  387. static int
  388. sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
  389. {
  390. struct sdw_msg msg;
  391. int ret;
  392. ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
  393. SDW_MSG_FLAG_WRITE, &value);
  394. if (ret < 0)
  395. return ret;
  396. return sdw_transfer(bus, &msg);
  397. }
  398. int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
  399. {
  400. struct sdw_msg msg;
  401. u8 buf;
  402. int ret;
  403. ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
  404. SDW_MSG_FLAG_READ, &buf);
  405. if (ret < 0)
  406. return ret;
  407. ret = sdw_transfer_unlocked(bus, &msg);
  408. if (ret < 0)
  409. return ret;
  410. return buf;
  411. }
  412. EXPORT_SYMBOL(sdw_bread_no_pm_unlocked);
  413. int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
  414. {
  415. struct sdw_msg msg;
  416. int ret;
  417. ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
  418. SDW_MSG_FLAG_WRITE, &value);
  419. if (ret < 0)
  420. return ret;
  421. return sdw_transfer_unlocked(bus, &msg);
  422. }
  423. EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
  424. int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
  425. {
  426. u8 buf;
  427. int ret;
  428. ret = sdw_nread_no_pm(slave, addr, 1, &buf);
  429. if (ret < 0)
  430. return ret;
  431. else
  432. return buf;
  433. }
  434. EXPORT_SYMBOL(sdw_read_no_pm);
  435. int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
  436. {
  437. int tmp;
  438. tmp = sdw_read_no_pm(slave, addr);
  439. if (tmp < 0)
  440. return tmp;
  441. tmp = (tmp & ~mask) | val;
  442. return sdw_write_no_pm(slave, addr, tmp);
  443. }
  444. EXPORT_SYMBOL(sdw_update_no_pm);
  445. /* Read-Modify-Write Slave register */
  446. int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
  447. {
  448. int tmp;
  449. tmp = sdw_read(slave, addr);
  450. if (tmp < 0)
  451. return tmp;
  452. tmp = (tmp & ~mask) | val;
  453. return sdw_write(slave, addr, tmp);
  454. }
  455. EXPORT_SYMBOL(sdw_update);
  456. /**
  457. * sdw_nread() - Read "n" contiguous SDW Slave registers
  458. * @slave: SDW Slave
  459. * @addr: Register address
  460. * @count: length
  461. * @val: Buffer for values to be read
  462. */
  463. int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
  464. {
  465. int ret;
  466. ret = pm_runtime_get_sync(&slave->dev);
  467. if (ret < 0 && ret != -EACCES) {
  468. pm_runtime_put_noidle(&slave->dev);
  469. return ret;
  470. }
  471. ret = sdw_nread_no_pm(slave, addr, count, val);
  472. pm_runtime_mark_last_busy(&slave->dev);
  473. pm_runtime_put(&slave->dev);
  474. return ret;
  475. }
  476. EXPORT_SYMBOL(sdw_nread);
  477. /**
  478. * sdw_nwrite() - Write "n" contiguous SDW Slave registers
  479. * @slave: SDW Slave
  480. * @addr: Register address
  481. * @count: length
  482. * @val: Buffer for values to be written
  483. */
  484. int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
  485. {
  486. int ret;
  487. ret = pm_runtime_get_sync(&slave->dev);
  488. if (ret < 0 && ret != -EACCES) {
  489. pm_runtime_put_noidle(&slave->dev);
  490. return ret;
  491. }
  492. ret = sdw_nwrite_no_pm(slave, addr, count, val);
  493. pm_runtime_mark_last_busy(&slave->dev);
  494. pm_runtime_put(&slave->dev);
  495. return ret;
  496. }
  497. EXPORT_SYMBOL(sdw_nwrite);
  498. /**
  499. * sdw_read() - Read a SDW Slave register
  500. * @slave: SDW Slave
  501. * @addr: Register address
  502. */
  503. int sdw_read(struct sdw_slave *slave, u32 addr)
  504. {
  505. u8 buf;
  506. int ret;
  507. ret = sdw_nread(slave, addr, 1, &buf);
  508. if (ret < 0)
  509. return ret;
  510. return buf;
  511. }
  512. EXPORT_SYMBOL(sdw_read);
  513. /**
  514. * sdw_write() - Write a SDW Slave register
  515. * @slave: SDW Slave
  516. * @addr: Register address
  517. * @value: Register value
  518. */
  519. int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
  520. {
  521. return sdw_nwrite(slave, addr, 1, &value);
  522. }
  523. EXPORT_SYMBOL(sdw_write);
  524. /*
  525. * SDW alert handling
  526. */
  527. /* called with bus_lock held */
  528. static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
  529. {
  530. struct sdw_slave *slave;
  531. list_for_each_entry(slave, &bus->slaves, node) {
  532. if (slave->dev_num == i)
  533. return slave;
  534. }
  535. return NULL;
  536. }
  537. int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
  538. {
  539. if (slave->id.mfg_id != id.mfg_id ||
  540. slave->id.part_id != id.part_id ||
  541. slave->id.class_id != id.class_id ||
  542. (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
  543. slave->id.unique_id != id.unique_id))
  544. return -ENODEV;
  545. return 0;
  546. }
  547. EXPORT_SYMBOL(sdw_compare_devid);
  548. /* called with bus_lock held */
  549. static int sdw_get_device_num(struct sdw_slave *slave)
  550. {
  551. int bit;
  552. if (slave->bus->dev_num_ida_min) {
  553. bit = ida_alloc_range(&sdw_peripheral_ida,
  554. slave->bus->dev_num_ida_min, SDW_MAX_DEVICES,
  555. GFP_KERNEL);
  556. if (bit < 0)
  557. goto err;
  558. } else {
  559. bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
  560. if (bit == SDW_MAX_DEVICES) {
  561. bit = -ENODEV;
  562. goto err;
  563. }
  564. }
  565. /*
  566. * Do not update dev_num in Slave data structure here,
  567. * Update once program dev_num is successful
  568. */
  569. set_bit(bit, slave->bus->assigned);
  570. err:
  571. return bit;
  572. }
  573. static int sdw_assign_device_num(struct sdw_slave *slave)
  574. {
  575. struct sdw_bus *bus = slave->bus;
  576. int ret, dev_num;
  577. bool new_device = false;
  578. /* check first if device number is assigned, if so reuse that */
  579. if (!slave->dev_num) {
  580. if (!slave->dev_num_sticky) {
  581. mutex_lock(&slave->bus->bus_lock);
  582. dev_num = sdw_get_device_num(slave);
  583. mutex_unlock(&slave->bus->bus_lock);
  584. if (dev_num < 0) {
  585. dev_err(bus->dev, "Get dev_num failed: %d\n",
  586. dev_num);
  587. return dev_num;
  588. }
  589. slave->dev_num = dev_num;
  590. slave->dev_num_sticky = dev_num;
  591. new_device = true;
  592. } else {
  593. slave->dev_num = slave->dev_num_sticky;
  594. }
  595. }
  596. if (!new_device)
  597. dev_dbg(bus->dev,
  598. "Slave already registered, reusing dev_num:%d\n",
  599. slave->dev_num);
  600. /* Clear the slave->dev_num to transfer message on device 0 */
  601. dev_num = slave->dev_num;
  602. slave->dev_num = 0;
  603. ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num);
  604. if (ret < 0) {
  605. dev_err(bus->dev, "Program device_num %d failed: %d\n",
  606. dev_num, ret);
  607. return ret;
  608. }
  609. /* After xfer of msg, restore dev_num */
  610. slave->dev_num = slave->dev_num_sticky;
  611. return 0;
  612. }
  613. void sdw_extract_slave_id(struct sdw_bus *bus,
  614. u64 addr, struct sdw_slave_id *id)
  615. {
  616. dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
  617. id->sdw_version = SDW_VERSION(addr);
  618. id->unique_id = SDW_UNIQUE_ID(addr);
  619. id->mfg_id = SDW_MFG_ID(addr);
  620. id->part_id = SDW_PART_ID(addr);
  621. id->class_id = SDW_CLASS_ID(addr);
  622. dev_dbg(bus->dev,
  623. "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
  624. id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
  625. }
  626. EXPORT_SYMBOL(sdw_extract_slave_id);
  627. static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
  628. {
  629. u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
  630. struct sdw_slave *slave, *_s;
  631. struct sdw_slave_id id;
  632. struct sdw_msg msg;
  633. bool found;
  634. int count = 0, ret;
  635. u64 addr;
  636. *programmed = false;
  637. /* No Slave, so use raw xfer api */
  638. ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
  639. SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
  640. if (ret < 0)
  641. return ret;
  642. do {
  643. ret = sdw_transfer(bus, &msg);
  644. if (ret == -ENODATA) { /* end of device id reads */
  645. dev_dbg(bus->dev, "No more devices to enumerate\n");
  646. ret = 0;
  647. break;
  648. }
  649. if (ret < 0) {
  650. dev_err(bus->dev, "DEVID read fail:%d\n", ret);
  651. break;
  652. }
  653. /*
  654. * Construct the addr and extract. Cast the higher shift
  655. * bits to avoid truncation due to size limit.
  656. */
  657. addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) |
  658. ((u64)buf[2] << 24) | ((u64)buf[1] << 32) |
  659. ((u64)buf[0] << 40);
  660. sdw_extract_slave_id(bus, addr, &id);
  661. found = false;
  662. /* Now compare with entries */
  663. list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
  664. if (sdw_compare_devid(slave, id) == 0) {
  665. found = true;
  666. /*
  667. * To prevent skipping state-machine stages don't
  668. * program a device until we've seen it UNATTACH.
  669. * Must return here because no other device on #0
  670. * can be detected until this one has been
  671. * assigned a device ID.
  672. */
  673. if (slave->status != SDW_SLAVE_UNATTACHED)
  674. return 0;
  675. /*
  676. * Assign a new dev_num to this Slave and
  677. * not mark it present. It will be marked
  678. * present after it reports ATTACHED on new
  679. * dev_num
  680. */
  681. ret = sdw_assign_device_num(slave);
  682. if (ret < 0) {
  683. dev_err(bus->dev,
  684. "Assign dev_num failed:%d\n",
  685. ret);
  686. return ret;
  687. }
  688. *programmed = true;
  689. break;
  690. }
  691. }
  692. if (!found) {
  693. /* TODO: Park this device in Group 13 */
  694. /*
  695. * add Slave device even if there is no platform
  696. * firmware description. There will be no driver probe
  697. * but the user/integration will be able to see the
  698. * device, enumeration status and device number in sysfs
  699. */
  700. sdw_slave_add(bus, &id, NULL);
  701. dev_err(bus->dev, "Slave Entry not found\n");
  702. }
  703. count++;
  704. /*
  705. * Check till error out or retry (count) exhausts.
  706. * Device can drop off and rejoin during enumeration
  707. * so count till twice the bound.
  708. */
  709. } while (ret == 0 && count < (SDW_MAX_DEVICES * 2));
  710. return ret;
  711. }
  712. static void sdw_modify_slave_status(struct sdw_slave *slave,
  713. enum sdw_slave_status status)
  714. {
  715. struct sdw_bus *bus = slave->bus;
  716. mutex_lock(&bus->bus_lock);
  717. dev_vdbg(bus->dev,
  718. "changing status slave %d status %d new status %d\n",
  719. slave->dev_num, slave->status, status);
  720. if (status == SDW_SLAVE_UNATTACHED) {
  721. dev_dbg(&slave->dev,
  722. "initializing enumeration and init completion for Slave %d\n",
  723. slave->dev_num);
  724. reinit_completion(&slave->enumeration_complete);
  725. reinit_completion(&slave->initialization_complete);
  726. } else if ((status == SDW_SLAVE_ATTACHED) &&
  727. (slave->status == SDW_SLAVE_UNATTACHED)) {
  728. dev_dbg(&slave->dev,
  729. "signaling enumeration completion for Slave %d\n",
  730. slave->dev_num);
  731. complete_all(&slave->enumeration_complete);
  732. }
  733. slave->status = status;
  734. mutex_unlock(&bus->bus_lock);
  735. }
  736. static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
  737. enum sdw_clk_stop_mode mode,
  738. enum sdw_clk_stop_type type)
  739. {
  740. int ret = 0;
  741. mutex_lock(&slave->sdw_dev_lock);
  742. if (slave->probed) {
  743. struct device *dev = &slave->dev;
  744. struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
  745. if (drv->ops && drv->ops->clk_stop)
  746. ret = drv->ops->clk_stop(slave, mode, type);
  747. }
  748. mutex_unlock(&slave->sdw_dev_lock);
  749. return ret;
  750. }
  751. static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
  752. enum sdw_clk_stop_mode mode,
  753. bool prepare)
  754. {
  755. bool wake_en;
  756. u32 val = 0;
  757. int ret;
  758. wake_en = slave->prop.wake_capable;
  759. if (prepare) {
  760. val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP;
  761. if (mode == SDW_CLK_STOP_MODE1)
  762. val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1;
  763. if (wake_en)
  764. val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
  765. } else {
  766. ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
  767. if (ret < 0) {
  768. if (ret != -ENODATA)
  769. dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
  770. return ret;
  771. }
  772. val = ret;
  773. val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
  774. }
  775. ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
  776. if (ret < 0 && ret != -ENODATA)
  777. dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret);
  778. return ret;
  779. }
  780. static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
  781. {
  782. int retry = bus->clk_stop_timeout;
  783. int val;
  784. do {
  785. val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT);
  786. if (val < 0) {
  787. if (val != -ENODATA)
  788. dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val);
  789. return val;
  790. }
  791. val &= SDW_SCP_STAT_CLK_STP_NF;
  792. if (!val) {
  793. dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d\n",
  794. dev_num);
  795. return 0;
  796. }
  797. usleep_range(1000, 1500);
  798. retry--;
  799. } while (retry);
  800. dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d\n",
  801. dev_num);
  802. return -ETIMEDOUT;
  803. }
  804. /**
  805. * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop
  806. *
  807. * @bus: SDW bus instance
  808. *
  809. * Query Slave for clock stop mode and prepare for that mode.
  810. */
  811. int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
  812. {
  813. bool simple_clk_stop = true;
  814. struct sdw_slave *slave;
  815. bool is_slave = false;
  816. int ret = 0;
  817. /*
  818. * In order to save on transition time, prepare
  819. * each Slave and then wait for all Slave(s) to be
  820. * prepared for clock stop.
  821. * If one of the Slave devices has lost sync and
  822. * replies with Command Ignored/-ENODATA, we continue
  823. * the loop
  824. */
  825. list_for_each_entry(slave, &bus->slaves, node) {
  826. if (!slave->dev_num)
  827. continue;
  828. if (slave->status != SDW_SLAVE_ATTACHED &&
  829. slave->status != SDW_SLAVE_ALERT)
  830. continue;
  831. /* Identify if Slave(s) are available on Bus */
  832. is_slave = true;
  833. ret = sdw_slave_clk_stop_callback(slave,
  834. SDW_CLK_STOP_MODE0,
  835. SDW_CLK_PRE_PREPARE);
  836. if (ret < 0 && ret != -ENODATA) {
  837. dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret);
  838. return ret;
  839. }
  840. /* Only prepare a Slave device if needed */
  841. if (!slave->prop.simple_clk_stop_capable) {
  842. simple_clk_stop = false;
  843. ret = sdw_slave_clk_stop_prepare(slave,
  844. SDW_CLK_STOP_MODE0,
  845. true);
  846. if (ret < 0 && ret != -ENODATA) {
  847. dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret);
  848. return ret;
  849. }
  850. }
  851. }
  852. /* Skip remaining clock stop preparation if no Slave is attached */
  853. if (!is_slave)
  854. return 0;
  855. /*
  856. * Don't wait for all Slaves to be ready if they follow the simple
  857. * state machine
  858. */
  859. if (!simple_clk_stop) {
  860. ret = sdw_bus_wait_for_clk_prep_deprep(bus,
  861. SDW_BROADCAST_DEV_NUM);
  862. /*
  863. * if there are no Slave devices present and the reply is
  864. * Command_Ignored/-ENODATA, we don't need to continue with the
  865. * flow and can just return here. The error code is not modified
  866. * and its handling left as an exercise for the caller.
  867. */
  868. if (ret < 0)
  869. return ret;
  870. }
  871. /* Inform slaves that prep is done */
  872. list_for_each_entry(slave, &bus->slaves, node) {
  873. if (!slave->dev_num)
  874. continue;
  875. if (slave->status != SDW_SLAVE_ATTACHED &&
  876. slave->status != SDW_SLAVE_ALERT)
  877. continue;
  878. ret = sdw_slave_clk_stop_callback(slave,
  879. SDW_CLK_STOP_MODE0,
  880. SDW_CLK_POST_PREPARE);
  881. if (ret < 0 && ret != -ENODATA) {
  882. dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret);
  883. return ret;
  884. }
  885. }
  886. return 0;
  887. }
  888. EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
  889. /**
  890. * sdw_bus_clk_stop: stop bus clock
  891. *
  892. * @bus: SDW bus instance
  893. *
  894. * After preparing the Slaves for clock stop, stop the clock by broadcasting
  895. * write to SCP_CTRL register.
  896. */
  897. int sdw_bus_clk_stop(struct sdw_bus *bus)
  898. {
  899. int ret;
  900. /*
  901. * broadcast clock stop now, attached Slaves will ACK this,
  902. * unattached will ignore
  903. */
  904. ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
  905. SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
  906. if (ret < 0) {
  907. if (ret != -ENODATA)
  908. dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret);
  909. return ret;
  910. }
  911. return 0;
  912. }
  913. EXPORT_SYMBOL(sdw_bus_clk_stop);
  914. /**
  915. * sdw_bus_exit_clk_stop: Exit clock stop mode
  916. *
  917. * @bus: SDW bus instance
  918. *
  919. * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves
  920. * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate
  921. * back.
  922. */
  923. int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
  924. {
  925. bool simple_clk_stop = true;
  926. struct sdw_slave *slave;
  927. bool is_slave = false;
  928. int ret;
  929. /*
  930. * In order to save on transition time, de-prepare
  931. * each Slave and then wait for all Slave(s) to be
  932. * de-prepared after clock resume.
  933. */
  934. list_for_each_entry(slave, &bus->slaves, node) {
  935. if (!slave->dev_num)
  936. continue;
  937. if (slave->status != SDW_SLAVE_ATTACHED &&
  938. slave->status != SDW_SLAVE_ALERT)
  939. continue;
  940. /* Identify if Slave(s) are available on Bus */
  941. is_slave = true;
  942. ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
  943. SDW_CLK_PRE_DEPREPARE);
  944. if (ret < 0)
  945. dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret);
  946. /* Only de-prepare a Slave device if needed */
  947. if (!slave->prop.simple_clk_stop_capable) {
  948. simple_clk_stop = false;
  949. ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0,
  950. false);
  951. if (ret < 0)
  952. dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret);
  953. }
  954. }
  955. /* Skip remaining clock stop de-preparation if no Slave is attached */
  956. if (!is_slave)
  957. return 0;
  958. /*
  959. * Don't wait for all Slaves to be ready if they follow the simple
  960. * state machine
  961. */
  962. if (!simple_clk_stop) {
  963. ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
  964. if (ret < 0)
  965. dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret);
  966. }
  967. list_for_each_entry(slave, &bus->slaves, node) {
  968. if (!slave->dev_num)
  969. continue;
  970. if (slave->status != SDW_SLAVE_ATTACHED &&
  971. slave->status != SDW_SLAVE_ALERT)
  972. continue;
  973. ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
  974. SDW_CLK_POST_DEPREPARE);
  975. if (ret < 0)
  976. dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret);
  977. }
  978. return 0;
  979. }
  980. EXPORT_SYMBOL(sdw_bus_exit_clk_stop);
  981. int sdw_configure_dpn_intr(struct sdw_slave *slave,
  982. int port, bool enable, int mask)
  983. {
  984. u32 addr;
  985. int ret;
  986. u8 val = 0;
  987. if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) {
  988. dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n",
  989. enable ? "on" : "off");
  990. mask |= SDW_DPN_INT_TEST_FAIL;
  991. }
  992. addr = SDW_DPN_INTMASK(port);
  993. /* Set/Clear port ready interrupt mask */
  994. if (enable) {
  995. val |= mask;
  996. val |= SDW_DPN_INT_PORT_READY;
  997. } else {
  998. val &= ~(mask);
  999. val &= ~SDW_DPN_INT_PORT_READY;
  1000. }
  1001. ret = sdw_update(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
  1002. if (ret < 0)
  1003. dev_err(&slave->dev,
  1004. "SDW_DPN_INTMASK write failed:%d\n", val);
  1005. return ret;
  1006. }
  1007. static int sdw_slave_set_frequency(struct sdw_slave *slave)
  1008. {
  1009. u32 mclk_freq = slave->bus->prop.mclk_freq;
  1010. u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
  1011. unsigned int scale;
  1012. u8 scale_index;
  1013. u8 base;
  1014. int ret;
  1015. /*
  1016. * frequency base and scale registers are required for SDCA
  1017. * devices. They may also be used for 1.2+/non-SDCA devices,
  1018. * but we will need a DisCo property to cover this case
  1019. */
  1020. if (!slave->id.class_id)
  1021. return 0;
  1022. if (!mclk_freq) {
  1023. dev_err(&slave->dev,
  1024. "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n");
  1025. return -EINVAL;
  1026. }
  1027. /*
  1028. * map base frequency using Table 89 of SoundWire 1.2 spec.
  1029. * The order of the tests just follows the specification, this
  1030. * is not a selection between possible values or a search for
  1031. * the best value but just a mapping. Only one case per platform
  1032. * is relevant.
  1033. * Some BIOS have inconsistent values for mclk_freq but a
  1034. * correct root so we force the mclk_freq to avoid variations.
  1035. */
  1036. if (!(19200000 % mclk_freq)) {
  1037. mclk_freq = 19200000;
  1038. base = SDW_SCP_BASE_CLOCK_19200000_HZ;
  1039. } else if (!(24000000 % mclk_freq)) {
  1040. mclk_freq = 24000000;
  1041. base = SDW_SCP_BASE_CLOCK_24000000_HZ;
  1042. } else if (!(24576000 % mclk_freq)) {
  1043. mclk_freq = 24576000;
  1044. base = SDW_SCP_BASE_CLOCK_24576000_HZ;
  1045. } else if (!(22579200 % mclk_freq)) {
  1046. mclk_freq = 22579200;
  1047. base = SDW_SCP_BASE_CLOCK_22579200_HZ;
  1048. } else if (!(32000000 % mclk_freq)) {
  1049. mclk_freq = 32000000;
  1050. base = SDW_SCP_BASE_CLOCK_32000000_HZ;
  1051. } else {
  1052. dev_err(&slave->dev,
  1053. "Unsupported clock base, mclk %d\n",
  1054. mclk_freq);
  1055. return -EINVAL;
  1056. }
  1057. if (mclk_freq % curr_freq) {
  1058. dev_err(&slave->dev,
  1059. "mclk %d is not multiple of bus curr_freq %d\n",
  1060. mclk_freq, curr_freq);
  1061. return -EINVAL;
  1062. }
  1063. scale = mclk_freq / curr_freq;
  1064. /*
  1065. * map scale to Table 90 of SoundWire 1.2 spec - and check
  1066. * that the scale is a power of two and maximum 64
  1067. */
  1068. scale_index = ilog2(scale);
  1069. if (BIT(scale_index) != scale || scale_index > 6) {
  1070. dev_err(&slave->dev,
  1071. "No match found for scale %d, bus mclk %d curr_freq %d\n",
  1072. scale, mclk_freq, curr_freq);
  1073. return -EINVAL;
  1074. }
  1075. scale_index++;
  1076. ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
  1077. if (ret < 0) {
  1078. dev_err(&slave->dev,
  1079. "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
  1080. return ret;
  1081. }
  1082. /* initialize scale for both banks */
  1083. ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
  1084. if (ret < 0) {
  1085. dev_err(&slave->dev,
  1086. "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
  1087. return ret;
  1088. }
  1089. ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
  1090. if (ret < 0)
  1091. dev_err(&slave->dev,
  1092. "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
  1093. dev_dbg(&slave->dev,
  1094. "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
  1095. base, scale_index, mclk_freq, curr_freq);
  1096. return ret;
  1097. }
  1098. static int sdw_initialize_slave(struct sdw_slave *slave)
  1099. {
  1100. struct sdw_slave_prop *prop = &slave->prop;
  1101. int status;
  1102. int ret;
  1103. u8 val;
  1104. ret = sdw_slave_set_frequency(slave);
  1105. if (ret < 0)
  1106. return ret;
  1107. if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) {
  1108. /* Clear bus clash interrupt before enabling interrupt mask */
  1109. status = sdw_read_no_pm(slave, SDW_SCP_INT1);
  1110. if (status < 0) {
  1111. dev_err(&slave->dev,
  1112. "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status);
  1113. return status;
  1114. }
  1115. if (status & SDW_SCP_INT1_BUS_CLASH) {
  1116. dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n");
  1117. ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH);
  1118. if (ret < 0) {
  1119. dev_err(&slave->dev,
  1120. "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret);
  1121. return ret;
  1122. }
  1123. }
  1124. }
  1125. if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
  1126. !(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
  1127. /* Clear parity interrupt before enabling interrupt mask */
  1128. status = sdw_read_no_pm(slave, SDW_SCP_INT1);
  1129. if (status < 0) {
  1130. dev_err(&slave->dev,
  1131. "SDW_SCP_INT1 (PARITY) read failed:%d\n", status);
  1132. return status;
  1133. }
  1134. if (status & SDW_SCP_INT1_PARITY) {
  1135. dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n");
  1136. ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY);
  1137. if (ret < 0) {
  1138. dev_err(&slave->dev,
  1139. "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret);
  1140. return ret;
  1141. }
  1142. }
  1143. }
  1144. /*
  1145. * Set SCP_INT1_MASK register, typically bus clash and
  1146. * implementation-defined interrupt mask. The Parity detection
  1147. * may not always be correct on startup so its use is
  1148. * device-dependent, it might e.g. only be enabled in
  1149. * steady-state after a couple of frames.
  1150. */
  1151. val = slave->prop.scp_int1_mask;
  1152. /* Enable SCP interrupts */
  1153. ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
  1154. if (ret < 0) {
  1155. dev_err(&slave->dev,
  1156. "SDW_SCP_INTMASK1 write failed:%d\n", ret);
  1157. return ret;
  1158. }
  1159. /* No need to continue if DP0 is not present */
  1160. if (!slave->prop.dp0_prop)
  1161. return 0;
  1162. /* Enable DP0 interrupts */
  1163. val = prop->dp0_prop->imp_def_interrupts;
  1164. val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
  1165. ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
  1166. if (ret < 0)
  1167. dev_err(&slave->dev,
  1168. "SDW_DP0_INTMASK read failed:%d\n", ret);
  1169. return ret;
  1170. }
  1171. static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
  1172. {
  1173. u8 clear, impl_int_mask;
  1174. int status, status2, ret, count = 0;
  1175. status = sdw_read_no_pm(slave, SDW_DP0_INT);
  1176. if (status < 0) {
  1177. dev_err(&slave->dev,
  1178. "SDW_DP0_INT read failed:%d\n", status);
  1179. return status;
  1180. }
  1181. do {
  1182. clear = status & ~SDW_DP0_INTERRUPTS;
  1183. if (status & SDW_DP0_INT_TEST_FAIL) {
  1184. dev_err(&slave->dev, "Test fail for port 0\n");
  1185. clear |= SDW_DP0_INT_TEST_FAIL;
  1186. }
  1187. /*
  1188. * Assumption: PORT_READY interrupt will be received only for
  1189. * ports implementing Channel Prepare state machine (CP_SM)
  1190. */
  1191. if (status & SDW_DP0_INT_PORT_READY) {
  1192. complete(&slave->port_ready[0]);
  1193. clear |= SDW_DP0_INT_PORT_READY;
  1194. }
  1195. if (status & SDW_DP0_INT_BRA_FAILURE) {
  1196. dev_err(&slave->dev, "BRA failed\n");
  1197. clear |= SDW_DP0_INT_BRA_FAILURE;
  1198. }
  1199. impl_int_mask = SDW_DP0_INT_IMPDEF1 |
  1200. SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3;
  1201. if (status & impl_int_mask) {
  1202. clear |= impl_int_mask;
  1203. *slave_status = clear;
  1204. }
  1205. /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */
  1206. ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear);
  1207. if (ret < 0) {
  1208. dev_err(&slave->dev,
  1209. "SDW_DP0_INT write failed:%d\n", ret);
  1210. return ret;
  1211. }
  1212. /* Read DP0 interrupt again */
  1213. status2 = sdw_read_no_pm(slave, SDW_DP0_INT);
  1214. if (status2 < 0) {
  1215. dev_err(&slave->dev,
  1216. "SDW_DP0_INT read failed:%d\n", status2);
  1217. return status2;
  1218. }
  1219. /* filter to limit loop to interrupts identified in the first status read */
  1220. status &= status2;
  1221. count++;
  1222. /* we can get alerts while processing so keep retrying */
  1223. } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
  1224. if (count == SDW_READ_INTR_CLEAR_RETRY)
  1225. dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n");
  1226. return ret;
  1227. }
  1228. static int sdw_handle_port_interrupt(struct sdw_slave *slave,
  1229. int port, u8 *slave_status)
  1230. {
  1231. u8 clear, impl_int_mask;
  1232. int status, status2, ret, count = 0;
  1233. u32 addr;
  1234. if (port == 0)
  1235. return sdw_handle_dp0_interrupt(slave, slave_status);
  1236. addr = SDW_DPN_INT(port);
  1237. status = sdw_read_no_pm(slave, addr);
  1238. if (status < 0) {
  1239. dev_err(&slave->dev,
  1240. "SDW_DPN_INT read failed:%d\n", status);
  1241. return status;
  1242. }
  1243. do {
  1244. clear = status & ~SDW_DPN_INTERRUPTS;
  1245. if (status & SDW_DPN_INT_TEST_FAIL) {
  1246. dev_err(&slave->dev, "Test fail for port:%d\n", port);
  1247. clear |= SDW_DPN_INT_TEST_FAIL;
  1248. }
  1249. /*
  1250. * Assumption: PORT_READY interrupt will be received only
  1251. * for ports implementing CP_SM.
  1252. */
  1253. if (status & SDW_DPN_INT_PORT_READY) {
  1254. complete(&slave->port_ready[port]);
  1255. clear |= SDW_DPN_INT_PORT_READY;
  1256. }
  1257. impl_int_mask = SDW_DPN_INT_IMPDEF1 |
  1258. SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
  1259. if (status & impl_int_mask) {
  1260. clear |= impl_int_mask;
  1261. *slave_status = clear;
  1262. }
  1263. /* clear the interrupt but don't touch reserved fields */
  1264. ret = sdw_write_no_pm(slave, addr, clear);
  1265. if (ret < 0) {
  1266. dev_err(&slave->dev,
  1267. "SDW_DPN_INT write failed:%d\n", ret);
  1268. return ret;
  1269. }
  1270. /* Read DPN interrupt again */
  1271. status2 = sdw_read_no_pm(slave, addr);
  1272. if (status2 < 0) {
  1273. dev_err(&slave->dev,
  1274. "SDW_DPN_INT read failed:%d\n", status2);
  1275. return status2;
  1276. }
  1277. /* filter to limit loop to interrupts identified in the first status read */
  1278. status &= status2;
  1279. count++;
  1280. /* we can get alerts while processing so keep retrying */
  1281. } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
  1282. if (count == SDW_READ_INTR_CLEAR_RETRY)
  1283. dev_warn(&slave->dev, "Reached MAX_RETRY on port read");
  1284. return ret;
  1285. }
  1286. static int sdw_handle_slave_alerts(struct sdw_slave *slave)
  1287. {
  1288. struct sdw_slave_intr_status slave_intr;
  1289. u8 clear = 0, bit, port_status[15] = {0};
  1290. int port_num, stat, ret, count = 0;
  1291. unsigned long port;
  1292. bool slave_notify;
  1293. u8 sdca_cascade = 0;
  1294. u8 buf, buf2[2], _buf, _buf2[2];
  1295. bool parity_check;
  1296. bool parity_quirk;
  1297. sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
  1298. ret = pm_runtime_get_sync(&slave->dev);
  1299. if (ret < 0 && ret != -EACCES) {
  1300. dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
  1301. pm_runtime_put_noidle(&slave->dev);
  1302. return ret;
  1303. }
  1304. /* Read Intstat 1, Intstat 2 and Intstat 3 registers */
  1305. ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
  1306. if (ret < 0) {
  1307. dev_err(&slave->dev,
  1308. "SDW_SCP_INT1 read failed:%d\n", ret);
  1309. goto io_err;
  1310. }
  1311. buf = ret;
  1312. ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
  1313. if (ret < 0) {
  1314. dev_err(&slave->dev,
  1315. "SDW_SCP_INT2/3 read failed:%d\n", ret);
  1316. goto io_err;
  1317. }
  1318. if (slave->prop.is_sdca) {
  1319. ret = sdw_read_no_pm(slave, SDW_DP0_INT);
  1320. if (ret < 0) {
  1321. dev_err(&slave->dev,
  1322. "SDW_DP0_INT read failed:%d\n", ret);
  1323. goto io_err;
  1324. }
  1325. sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
  1326. }
  1327. do {
  1328. slave_notify = false;
  1329. /*
  1330. * Check parity, bus clash and Slave (impl defined)
  1331. * interrupt
  1332. */
  1333. if (buf & SDW_SCP_INT1_PARITY) {
  1334. parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY;
  1335. parity_quirk = !slave->first_interrupt_done &&
  1336. (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY);
  1337. if (parity_check && !parity_quirk)
  1338. dev_err(&slave->dev, "Parity error detected\n");
  1339. clear |= SDW_SCP_INT1_PARITY;
  1340. }
  1341. if (buf & SDW_SCP_INT1_BUS_CLASH) {
  1342. if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH)
  1343. dev_err(&slave->dev, "Bus clash detected\n");
  1344. clear |= SDW_SCP_INT1_BUS_CLASH;
  1345. }
  1346. /*
  1347. * When bus clash or parity errors are detected, such errors
  1348. * are unlikely to be recoverable errors.
  1349. * TODO: In such scenario, reset bus. Make this configurable
  1350. * via sysfs property with bus reset being the default.
  1351. */
  1352. if (buf & SDW_SCP_INT1_IMPL_DEF) {
  1353. if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) {
  1354. dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
  1355. slave_notify = true;
  1356. }
  1357. clear |= SDW_SCP_INT1_IMPL_DEF;
  1358. }
  1359. /* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */
  1360. if (sdca_cascade)
  1361. slave_notify = true;
  1362. /* Check port 0 - 3 interrupts */
  1363. port = buf & SDW_SCP_INT1_PORT0_3;
  1364. /* To get port number corresponding to bits, shift it */
  1365. port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port);
  1366. for_each_set_bit(bit, &port, 8) {
  1367. sdw_handle_port_interrupt(slave, bit,
  1368. &port_status[bit]);
  1369. }
  1370. /* Check if cascade 2 interrupt is present */
  1371. if (buf & SDW_SCP_INT1_SCP2_CASCADE) {
  1372. port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
  1373. for_each_set_bit(bit, &port, 8) {
  1374. /* scp2 ports start from 4 */
  1375. port_num = bit + 4;
  1376. sdw_handle_port_interrupt(slave,
  1377. port_num,
  1378. &port_status[port_num]);
  1379. }
  1380. }
  1381. /* now check last cascade */
  1382. if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) {
  1383. port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
  1384. for_each_set_bit(bit, &port, 8) {
  1385. /* scp3 ports start from 11 */
  1386. port_num = bit + 11;
  1387. sdw_handle_port_interrupt(slave,
  1388. port_num,
  1389. &port_status[port_num]);
  1390. }
  1391. }
  1392. /* Update the Slave driver */
  1393. if (slave_notify) {
  1394. mutex_lock(&slave->sdw_dev_lock);
  1395. if (slave->probed) {
  1396. struct device *dev = &slave->dev;
  1397. struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
  1398. if (drv->ops && drv->ops->interrupt_callback) {
  1399. slave_intr.sdca_cascade = sdca_cascade;
  1400. slave_intr.control_port = clear;
  1401. memcpy(slave_intr.port, &port_status,
  1402. sizeof(slave_intr.port));
  1403. drv->ops->interrupt_callback(slave, &slave_intr);
  1404. }
  1405. }
  1406. mutex_unlock(&slave->sdw_dev_lock);
  1407. }
  1408. /* Ack interrupt */
  1409. ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear);
  1410. if (ret < 0) {
  1411. dev_err(&slave->dev,
  1412. "SDW_SCP_INT1 write failed:%d\n", ret);
  1413. goto io_err;
  1414. }
  1415. /* at this point all initial interrupt sources were handled */
  1416. slave->first_interrupt_done = true;
  1417. /*
  1418. * Read status again to ensure no new interrupts arrived
  1419. * while servicing interrupts.
  1420. */
  1421. ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
  1422. if (ret < 0) {
  1423. dev_err(&slave->dev,
  1424. "SDW_SCP_INT1 recheck read failed:%d\n", ret);
  1425. goto io_err;
  1426. }
  1427. _buf = ret;
  1428. ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, _buf2);
  1429. if (ret < 0) {
  1430. dev_err(&slave->dev,
  1431. "SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
  1432. goto io_err;
  1433. }
  1434. if (slave->prop.is_sdca) {
  1435. ret = sdw_read_no_pm(slave, SDW_DP0_INT);
  1436. if (ret < 0) {
  1437. dev_err(&slave->dev,
  1438. "SDW_DP0_INT recheck read failed:%d\n", ret);
  1439. goto io_err;
  1440. }
  1441. sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
  1442. }
  1443. /*
  1444. * Make sure no interrupts are pending, but filter to limit loop
  1445. * to interrupts identified in the first status read
  1446. */
  1447. buf &= _buf;
  1448. buf2[0] &= _buf2[0];
  1449. buf2[1] &= _buf2[1];
  1450. stat = buf || buf2[0] || buf2[1] || sdca_cascade;
  1451. /*
  1452. * Exit loop if Slave is continuously in ALERT state even
  1453. * after servicing the interrupt multiple times.
  1454. */
  1455. count++;
  1456. /* we can get alerts while processing so keep retrying */
  1457. } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
  1458. if (count == SDW_READ_INTR_CLEAR_RETRY)
  1459. dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n");
  1460. io_err:
  1461. pm_runtime_mark_last_busy(&slave->dev);
  1462. pm_runtime_put_autosuspend(&slave->dev);
  1463. return ret;
  1464. }
  1465. static int sdw_update_slave_status(struct sdw_slave *slave,
  1466. enum sdw_slave_status status)
  1467. {
  1468. int ret = 0;
  1469. mutex_lock(&slave->sdw_dev_lock);
  1470. if (slave->probed) {
  1471. struct device *dev = &slave->dev;
  1472. struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
  1473. if (drv->ops && drv->ops->update_status)
  1474. ret = drv->ops->update_status(slave, status);
  1475. }
  1476. mutex_unlock(&slave->sdw_dev_lock);
  1477. return ret;
  1478. }
  1479. /**
  1480. * sdw_handle_slave_status() - Handle Slave status
  1481. * @bus: SDW bus instance
  1482. * @status: Status for all Slave(s)
  1483. */
  1484. int sdw_handle_slave_status(struct sdw_bus *bus,
  1485. enum sdw_slave_status status[])
  1486. {
  1487. enum sdw_slave_status prev_status;
  1488. struct sdw_slave *slave;
  1489. bool attached_initializing, id_programmed;
  1490. int i, ret = 0;
  1491. /* first check if any Slaves fell off the bus */
  1492. for (i = 1; i <= SDW_MAX_DEVICES; i++) {
  1493. mutex_lock(&bus->bus_lock);
  1494. if (test_bit(i, bus->assigned) == false) {
  1495. mutex_unlock(&bus->bus_lock);
  1496. continue;
  1497. }
  1498. mutex_unlock(&bus->bus_lock);
  1499. slave = sdw_get_slave(bus, i);
  1500. if (!slave)
  1501. continue;
  1502. if (status[i] == SDW_SLAVE_UNATTACHED &&
  1503. slave->status != SDW_SLAVE_UNATTACHED) {
  1504. dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n",
  1505. i, slave->status);
  1506. sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
  1507. /* Ensure driver knows that peripheral unattached */
  1508. ret = sdw_update_slave_status(slave, status[i]);
  1509. if (ret < 0)
  1510. dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret);
  1511. }
  1512. }
  1513. if (status[0] == SDW_SLAVE_ATTACHED) {
  1514. dev_dbg(bus->dev, "Slave attached, programming device number\n");
  1515. /*
  1516. * Programming a device number will have side effects,
  1517. * so we deal with other devices at a later time.
  1518. * This relies on those devices reporting ATTACHED, which will
  1519. * trigger another call to this function. This will only
  1520. * happen if at least one device ID was programmed.
  1521. * Error returns from sdw_program_device_num() are currently
  1522. * ignored because there's no useful recovery that can be done.
  1523. * Returning the error here could result in the current status
  1524. * of other devices not being handled, because if no device IDs
  1525. * were programmed there's nothing to guarantee a status change
  1526. * to trigger another call to this function.
  1527. */
  1528. sdw_program_device_num(bus, &id_programmed);
  1529. if (id_programmed)
  1530. return 0;
  1531. }
  1532. /* Continue to check other slave statuses */
  1533. for (i = 1; i <= SDW_MAX_DEVICES; i++) {
  1534. mutex_lock(&bus->bus_lock);
  1535. if (test_bit(i, bus->assigned) == false) {
  1536. mutex_unlock(&bus->bus_lock);
  1537. continue;
  1538. }
  1539. mutex_unlock(&bus->bus_lock);
  1540. slave = sdw_get_slave(bus, i);
  1541. if (!slave)
  1542. continue;
  1543. attached_initializing = false;
  1544. switch (status[i]) {
  1545. case SDW_SLAVE_UNATTACHED:
  1546. if (slave->status == SDW_SLAVE_UNATTACHED)
  1547. break;
  1548. dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n",
  1549. i, slave->status);
  1550. sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
  1551. break;
  1552. case SDW_SLAVE_ALERT:
  1553. ret = sdw_handle_slave_alerts(slave);
  1554. if (ret < 0)
  1555. dev_err(&slave->dev,
  1556. "Slave %d alert handling failed: %d\n",
  1557. i, ret);
  1558. break;
  1559. case SDW_SLAVE_ATTACHED:
  1560. if (slave->status == SDW_SLAVE_ATTACHED)
  1561. break;
  1562. prev_status = slave->status;
  1563. sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED);
  1564. if (prev_status == SDW_SLAVE_ALERT)
  1565. break;
  1566. attached_initializing = true;
  1567. ret = sdw_initialize_slave(slave);
  1568. if (ret < 0)
  1569. dev_err(&slave->dev,
  1570. "Slave %d initialization failed: %d\n",
  1571. i, ret);
  1572. break;
  1573. default:
  1574. dev_err(&slave->dev, "Invalid slave %d status:%d\n",
  1575. i, status[i]);
  1576. break;
  1577. }
  1578. ret = sdw_update_slave_status(slave, status[i]);
  1579. if (ret < 0)
  1580. dev_err(&slave->dev,
  1581. "Update Slave status failed:%d\n", ret);
  1582. if (attached_initializing) {
  1583. dev_dbg(&slave->dev,
  1584. "signaling initialization completion for Slave %d\n",
  1585. slave->dev_num);
  1586. complete_all(&slave->initialization_complete);
  1587. /*
  1588. * If the manager became pm_runtime active, the peripherals will be
  1589. * restarted and attach, but their pm_runtime status may remain
  1590. * suspended. If the 'update_slave_status' callback initiates
  1591. * any sort of deferred processing, this processing would not be
  1592. * cancelled on pm_runtime suspend.
  1593. * To avoid such zombie states, we queue a request to resume.
  1594. * This would be a no-op in case the peripheral was being resumed
  1595. * by e.g. the ALSA/ASoC framework.
  1596. */
  1597. pm_request_resume(&slave->dev);
  1598. }
  1599. }
  1600. return ret;
  1601. }
  1602. EXPORT_SYMBOL(sdw_handle_slave_status);
  1603. void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
  1604. {
  1605. struct sdw_slave *slave;
  1606. int i;
  1607. /* Check all non-zero devices */
  1608. for (i = 1; i <= SDW_MAX_DEVICES; i++) {
  1609. mutex_lock(&bus->bus_lock);
  1610. if (test_bit(i, bus->assigned) == false) {
  1611. mutex_unlock(&bus->bus_lock);
  1612. continue;
  1613. }
  1614. mutex_unlock(&bus->bus_lock);
  1615. slave = sdw_get_slave(bus, i);
  1616. if (!slave)
  1617. continue;
  1618. if (slave->status != SDW_SLAVE_UNATTACHED) {
  1619. sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
  1620. slave->first_interrupt_done = false;
  1621. sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED);
  1622. }
  1623. /* keep track of request, used in pm_runtime resume */
  1624. slave->unattach_request = request;
  1625. }
  1626. }
  1627. EXPORT_SYMBOL(sdw_clear_slave_status);