svc.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SVC Greybus driver.
  4. *
  5. * Copyright 2015 Google Inc.
  6. * Copyright 2015 Linaro Ltd.
  7. */
  8. #include <linux/debugfs.h>
  9. #include <linux/workqueue.h>
  10. #include <linux/greybus.h>
  11. #define SVC_INTF_EJECT_TIMEOUT 9000
  12. #define SVC_INTF_ACTIVATE_TIMEOUT 6000
  13. #define SVC_INTF_RESUME_TIMEOUT 3000
  14. struct gb_svc_deferred_request {
  15. struct work_struct work;
  16. struct gb_operation *operation;
  17. };
  18. static int gb_svc_queue_deferred_request(struct gb_operation *operation);
  19. static ssize_t endo_id_show(struct device *dev,
  20. struct device_attribute *attr, char *buf)
  21. {
  22. struct gb_svc *svc = to_gb_svc(dev);
  23. return sprintf(buf, "0x%04x\n", svc->endo_id);
  24. }
  25. static DEVICE_ATTR_RO(endo_id);
  26. static ssize_t ap_intf_id_show(struct device *dev,
  27. struct device_attribute *attr, char *buf)
  28. {
  29. struct gb_svc *svc = to_gb_svc(dev);
  30. return sprintf(buf, "%u\n", svc->ap_intf_id);
  31. }
  32. static DEVICE_ATTR_RO(ap_intf_id);
  33. // FIXME
  34. // This is a hack, we need to do this "right" and clean the interface up
  35. // properly, not just forcibly yank the thing out of the system and hope for the
  36. // best. But for now, people want their modules to come out without having to
  37. // throw the thing to the ground or get out a screwdriver.
  38. static ssize_t intf_eject_store(struct device *dev,
  39. struct device_attribute *attr, const char *buf,
  40. size_t len)
  41. {
  42. struct gb_svc *svc = to_gb_svc(dev);
  43. unsigned short intf_id;
  44. int ret;
  45. ret = kstrtou16(buf, 10, &intf_id);
  46. if (ret < 0)
  47. return ret;
  48. dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
  49. ret = gb_svc_intf_eject(svc, intf_id);
  50. if (ret < 0)
  51. return ret;
  52. return len;
  53. }
  54. static DEVICE_ATTR_WO(intf_eject);
  55. static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
  56. char *buf)
  57. {
  58. struct gb_svc *svc = to_gb_svc(dev);
  59. return sprintf(buf, "%s\n",
  60. gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
  61. }
  62. static ssize_t watchdog_store(struct device *dev,
  63. struct device_attribute *attr, const char *buf,
  64. size_t len)
  65. {
  66. struct gb_svc *svc = to_gb_svc(dev);
  67. int retval;
  68. bool user_request;
  69. retval = strtobool(buf, &user_request);
  70. if (retval)
  71. return retval;
  72. if (user_request)
  73. retval = gb_svc_watchdog_enable(svc);
  74. else
  75. retval = gb_svc_watchdog_disable(svc);
  76. if (retval)
  77. return retval;
  78. return len;
  79. }
  80. static DEVICE_ATTR_RW(watchdog);
  81. static ssize_t watchdog_action_show(struct device *dev,
  82. struct device_attribute *attr, char *buf)
  83. {
  84. struct gb_svc *svc = to_gb_svc(dev);
  85. if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
  86. return sprintf(buf, "panic\n");
  87. else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
  88. return sprintf(buf, "reset\n");
  89. return -EINVAL;
  90. }
  91. static ssize_t watchdog_action_store(struct device *dev,
  92. struct device_attribute *attr,
  93. const char *buf, size_t len)
  94. {
  95. struct gb_svc *svc = to_gb_svc(dev);
  96. if (sysfs_streq(buf, "panic"))
  97. svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
  98. else if (sysfs_streq(buf, "reset"))
  99. svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
  100. else
  101. return -EINVAL;
  102. return len;
  103. }
  104. static DEVICE_ATTR_RW(watchdog_action);
  105. static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
  106. {
  107. struct gb_svc_pwrmon_rail_count_get_response response;
  108. int ret;
  109. ret = gb_operation_sync(svc->connection,
  110. GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
  111. &response, sizeof(response));
  112. if (ret) {
  113. dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
  114. return ret;
  115. }
  116. *value = response.rail_count;
  117. return 0;
  118. }
  119. static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
  120. struct gb_svc_pwrmon_rail_names_get_response *response,
  121. size_t bufsize)
  122. {
  123. int ret;
  124. ret = gb_operation_sync(svc->connection,
  125. GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
  126. response, bufsize);
  127. if (ret) {
  128. dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
  129. return ret;
  130. }
  131. if (response->status != GB_SVC_OP_SUCCESS) {
  132. dev_err(&svc->dev,
  133. "SVC error while getting rail names: %u\n",
  134. response->status);
  135. return -EREMOTEIO;
  136. }
  137. return 0;
  138. }
  139. static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
  140. u8 measurement_type, u32 *value)
  141. {
  142. struct gb_svc_pwrmon_sample_get_request request;
  143. struct gb_svc_pwrmon_sample_get_response response;
  144. int ret;
  145. request.rail_id = rail_id;
  146. request.measurement_type = measurement_type;
  147. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
  148. &request, sizeof(request),
  149. &response, sizeof(response));
  150. if (ret) {
  151. dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
  152. return ret;
  153. }
  154. if (response.result) {
  155. dev_err(&svc->dev,
  156. "UniPro error while getting rail power sample (%d %d): %d\n",
  157. rail_id, measurement_type, response.result);
  158. switch (response.result) {
  159. case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
  160. return -EINVAL;
  161. case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
  162. return -ENOMSG;
  163. default:
  164. return -EREMOTEIO;
  165. }
  166. }
  167. *value = le32_to_cpu(response.measurement);
  168. return 0;
  169. }
  170. int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
  171. u8 measurement_type, u32 *value)
  172. {
  173. struct gb_svc_pwrmon_intf_sample_get_request request;
  174. struct gb_svc_pwrmon_intf_sample_get_response response;
  175. int ret;
  176. request.intf_id = intf_id;
  177. request.measurement_type = measurement_type;
  178. ret = gb_operation_sync(svc->connection,
  179. GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
  180. &request, sizeof(request),
  181. &response, sizeof(response));
  182. if (ret) {
  183. dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
  184. return ret;
  185. }
  186. if (response.result) {
  187. dev_err(&svc->dev,
  188. "UniPro error while getting intf power sample (%d %d): %d\n",
  189. intf_id, measurement_type, response.result);
  190. switch (response.result) {
  191. case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
  192. return -EINVAL;
  193. case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
  194. return -ENOMSG;
  195. default:
  196. return -EREMOTEIO;
  197. }
  198. }
  199. *value = le32_to_cpu(response.measurement);
  200. return 0;
  201. }
  202. static struct attribute *svc_attrs[] = {
  203. &dev_attr_endo_id.attr,
  204. &dev_attr_ap_intf_id.attr,
  205. &dev_attr_intf_eject.attr,
  206. &dev_attr_watchdog.attr,
  207. &dev_attr_watchdog_action.attr,
  208. NULL,
  209. };
  210. ATTRIBUTE_GROUPS(svc);
  211. int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
  212. {
  213. struct gb_svc_intf_device_id_request request;
  214. request.intf_id = intf_id;
  215. request.device_id = device_id;
  216. return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
  217. &request, sizeof(request), NULL, 0);
  218. }
  219. int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
  220. {
  221. struct gb_svc_intf_eject_request request;
  222. int ret;
  223. request.intf_id = intf_id;
  224. /*
  225. * The pulse width for module release in svc is long so we need to
  226. * increase the timeout so the operation will not return to soon.
  227. */
  228. ret = gb_operation_sync_timeout(svc->connection,
  229. GB_SVC_TYPE_INTF_EJECT, &request,
  230. sizeof(request), NULL, 0,
  231. SVC_INTF_EJECT_TIMEOUT);
  232. if (ret) {
  233. dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
  234. return ret;
  235. }
  236. return 0;
  237. }
  238. int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
  239. {
  240. struct gb_svc_intf_vsys_request request;
  241. struct gb_svc_intf_vsys_response response;
  242. int type, ret;
  243. request.intf_id = intf_id;
  244. if (enable)
  245. type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
  246. else
  247. type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
  248. ret = gb_operation_sync(svc->connection, type,
  249. &request, sizeof(request),
  250. &response, sizeof(response));
  251. if (ret < 0)
  252. return ret;
  253. if (response.result_code != GB_SVC_INTF_VSYS_OK)
  254. return -EREMOTEIO;
  255. return 0;
  256. }
  257. int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
  258. {
  259. struct gb_svc_intf_refclk_request request;
  260. struct gb_svc_intf_refclk_response response;
  261. int type, ret;
  262. request.intf_id = intf_id;
  263. if (enable)
  264. type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
  265. else
  266. type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
  267. ret = gb_operation_sync(svc->connection, type,
  268. &request, sizeof(request),
  269. &response, sizeof(response));
  270. if (ret < 0)
  271. return ret;
  272. if (response.result_code != GB_SVC_INTF_REFCLK_OK)
  273. return -EREMOTEIO;
  274. return 0;
  275. }
  276. int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
  277. {
  278. struct gb_svc_intf_unipro_request request;
  279. struct gb_svc_intf_unipro_response response;
  280. int type, ret;
  281. request.intf_id = intf_id;
  282. if (enable)
  283. type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
  284. else
  285. type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
  286. ret = gb_operation_sync(svc->connection, type,
  287. &request, sizeof(request),
  288. &response, sizeof(response));
  289. if (ret < 0)
  290. return ret;
  291. if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
  292. return -EREMOTEIO;
  293. return 0;
  294. }
  295. int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
  296. {
  297. struct gb_svc_intf_activate_request request;
  298. struct gb_svc_intf_activate_response response;
  299. int ret;
  300. request.intf_id = intf_id;
  301. ret = gb_operation_sync_timeout(svc->connection,
  302. GB_SVC_TYPE_INTF_ACTIVATE,
  303. &request, sizeof(request),
  304. &response, sizeof(response),
  305. SVC_INTF_ACTIVATE_TIMEOUT);
  306. if (ret < 0)
  307. return ret;
  308. if (response.status != GB_SVC_OP_SUCCESS) {
  309. dev_err(&svc->dev, "failed to activate interface %u: %u\n",
  310. intf_id, response.status);
  311. return -EREMOTEIO;
  312. }
  313. *intf_type = response.intf_type;
  314. return 0;
  315. }
  316. int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
  317. {
  318. struct gb_svc_intf_resume_request request;
  319. struct gb_svc_intf_resume_response response;
  320. int ret;
  321. request.intf_id = intf_id;
  322. ret = gb_operation_sync_timeout(svc->connection,
  323. GB_SVC_TYPE_INTF_RESUME,
  324. &request, sizeof(request),
  325. &response, sizeof(response),
  326. SVC_INTF_RESUME_TIMEOUT);
  327. if (ret < 0) {
  328. dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
  329. intf_id, ret);
  330. return ret;
  331. }
  332. if (response.status != GB_SVC_OP_SUCCESS) {
  333. dev_err(&svc->dev, "failed to resume interface %u: %u\n",
  334. intf_id, response.status);
  335. return -EREMOTEIO;
  336. }
  337. return 0;
  338. }
  339. int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
  340. u32 *value)
  341. {
  342. struct gb_svc_dme_peer_get_request request;
  343. struct gb_svc_dme_peer_get_response response;
  344. u16 result;
  345. int ret;
  346. request.intf_id = intf_id;
  347. request.attr = cpu_to_le16(attr);
  348. request.selector = cpu_to_le16(selector);
  349. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
  350. &request, sizeof(request),
  351. &response, sizeof(response));
  352. if (ret) {
  353. dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
  354. intf_id, attr, selector, ret);
  355. return ret;
  356. }
  357. result = le16_to_cpu(response.result_code);
  358. if (result) {
  359. dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
  360. intf_id, attr, selector, result);
  361. return -EREMOTEIO;
  362. }
  363. if (value)
  364. *value = le32_to_cpu(response.attr_value);
  365. return 0;
  366. }
  367. int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
  368. u32 value)
  369. {
  370. struct gb_svc_dme_peer_set_request request;
  371. struct gb_svc_dme_peer_set_response response;
  372. u16 result;
  373. int ret;
  374. request.intf_id = intf_id;
  375. request.attr = cpu_to_le16(attr);
  376. request.selector = cpu_to_le16(selector);
  377. request.value = cpu_to_le32(value);
  378. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
  379. &request, sizeof(request),
  380. &response, sizeof(response));
  381. if (ret) {
  382. dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
  383. intf_id, attr, selector, value, ret);
  384. return ret;
  385. }
  386. result = le16_to_cpu(response.result_code);
  387. if (result) {
  388. dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
  389. intf_id, attr, selector, value, result);
  390. return -EREMOTEIO;
  391. }
  392. return 0;
  393. }
  394. int gb_svc_connection_create(struct gb_svc *svc,
  395. u8 intf1_id, u16 cport1_id,
  396. u8 intf2_id, u16 cport2_id,
  397. u8 cport_flags)
  398. {
  399. struct gb_svc_conn_create_request request;
  400. request.intf1_id = intf1_id;
  401. request.cport1_id = cpu_to_le16(cport1_id);
  402. request.intf2_id = intf2_id;
  403. request.cport2_id = cpu_to_le16(cport2_id);
  404. request.tc = 0; /* TC0 */
  405. request.flags = cport_flags;
  406. return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
  407. &request, sizeof(request), NULL, 0);
  408. }
  409. void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
  410. u8 intf2_id, u16 cport2_id)
  411. {
  412. struct gb_svc_conn_destroy_request request;
  413. struct gb_connection *connection = svc->connection;
  414. int ret;
  415. request.intf1_id = intf1_id;
  416. request.cport1_id = cpu_to_le16(cport1_id);
  417. request.intf2_id = intf2_id;
  418. request.cport2_id = cpu_to_le16(cport2_id);
  419. ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
  420. &request, sizeof(request), NULL, 0);
  421. if (ret) {
  422. dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
  423. intf1_id, cport1_id, intf2_id, cport2_id, ret);
  424. }
  425. }
  426. /* Creates bi-directional routes between the devices */
  427. int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
  428. u8 intf2_id, u8 dev2_id)
  429. {
  430. struct gb_svc_route_create_request request;
  431. request.intf1_id = intf1_id;
  432. request.dev1_id = dev1_id;
  433. request.intf2_id = intf2_id;
  434. request.dev2_id = dev2_id;
  435. return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
  436. &request, sizeof(request), NULL, 0);
  437. }
  438. /* Destroys bi-directional routes between the devices */
  439. void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
  440. {
  441. struct gb_svc_route_destroy_request request;
  442. int ret;
  443. request.intf1_id = intf1_id;
  444. request.intf2_id = intf2_id;
  445. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
  446. &request, sizeof(request), NULL, 0);
  447. if (ret) {
  448. dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
  449. intf1_id, intf2_id, ret);
  450. }
  451. }
  452. int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
  453. u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
  454. u8 tx_amplitude, u8 tx_hs_equalizer,
  455. u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
  456. u8 flags, u32 quirks,
  457. struct gb_svc_l2_timer_cfg *local,
  458. struct gb_svc_l2_timer_cfg *remote)
  459. {
  460. struct gb_svc_intf_set_pwrm_request request;
  461. struct gb_svc_intf_set_pwrm_response response;
  462. int ret;
  463. u16 result_code;
  464. memset(&request, 0, sizeof(request));
  465. request.intf_id = intf_id;
  466. request.hs_series = hs_series;
  467. request.tx_mode = tx_mode;
  468. request.tx_gear = tx_gear;
  469. request.tx_nlanes = tx_nlanes;
  470. request.tx_amplitude = tx_amplitude;
  471. request.tx_hs_equalizer = tx_hs_equalizer;
  472. request.rx_mode = rx_mode;
  473. request.rx_gear = rx_gear;
  474. request.rx_nlanes = rx_nlanes;
  475. request.flags = flags;
  476. request.quirks = cpu_to_le32(quirks);
  477. if (local)
  478. request.local_l2timerdata = *local;
  479. if (remote)
  480. request.remote_l2timerdata = *remote;
  481. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
  482. &request, sizeof(request),
  483. &response, sizeof(response));
  484. if (ret < 0)
  485. return ret;
  486. result_code = response.result_code;
  487. if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
  488. dev_err(&svc->dev, "set power mode = %d\n", result_code);
  489. return -EIO;
  490. }
  491. return 0;
  492. }
  493. EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
  494. int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
  495. {
  496. struct gb_svc_intf_set_pwrm_request request;
  497. struct gb_svc_intf_set_pwrm_response response;
  498. int ret;
  499. u16 result_code;
  500. memset(&request, 0, sizeof(request));
  501. request.intf_id = intf_id;
  502. request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
  503. request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
  504. request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
  505. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
  506. &request, sizeof(request),
  507. &response, sizeof(response));
  508. if (ret < 0) {
  509. dev_err(&svc->dev,
  510. "failed to send set power mode operation to interface %u: %d\n",
  511. intf_id, ret);
  512. return ret;
  513. }
  514. result_code = response.result_code;
  515. if (result_code != GB_SVC_SETPWRM_PWR_OK) {
  516. dev_err(&svc->dev,
  517. "failed to hibernate the link for interface %u: %u\n",
  518. intf_id, result_code);
  519. return -EIO;
  520. }
  521. return 0;
  522. }
  523. int gb_svc_ping(struct gb_svc *svc)
  524. {
  525. return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
  526. NULL, 0, NULL, 0,
  527. GB_OPERATION_TIMEOUT_DEFAULT * 2);
  528. }
  529. static int gb_svc_version_request(struct gb_operation *op)
  530. {
  531. struct gb_connection *connection = op->connection;
  532. struct gb_svc *svc = gb_connection_get_data(connection);
  533. struct gb_svc_version_request *request;
  534. struct gb_svc_version_response *response;
  535. if (op->request->payload_size < sizeof(*request)) {
  536. dev_err(&svc->dev, "short version request (%zu < %zu)\n",
  537. op->request->payload_size,
  538. sizeof(*request));
  539. return -EINVAL;
  540. }
  541. request = op->request->payload;
  542. if (request->major > GB_SVC_VERSION_MAJOR) {
  543. dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
  544. request->major, GB_SVC_VERSION_MAJOR);
  545. return -ENOTSUPP;
  546. }
  547. svc->protocol_major = request->major;
  548. svc->protocol_minor = request->minor;
  549. if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
  550. return -ENOMEM;
  551. response = op->response->payload;
  552. response->major = svc->protocol_major;
  553. response->minor = svc->protocol_minor;
  554. return 0;
  555. }
  556. static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
  557. size_t len, loff_t *offset)
  558. {
  559. struct svc_debugfs_pwrmon_rail *pwrmon_rails =
  560. file_inode(file)->i_private;
  561. struct gb_svc *svc = pwrmon_rails->svc;
  562. int ret, desc;
  563. u32 value;
  564. char buff[16];
  565. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  566. GB_SVC_PWRMON_TYPE_VOL, &value);
  567. if (ret) {
  568. dev_err(&svc->dev,
  569. "failed to get voltage sample %u: %d\n",
  570. pwrmon_rails->id, ret);
  571. return ret;
  572. }
  573. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  574. return simple_read_from_buffer(buf, len, offset, buff, desc);
  575. }
  576. static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
  577. size_t len, loff_t *offset)
  578. {
  579. struct svc_debugfs_pwrmon_rail *pwrmon_rails =
  580. file_inode(file)->i_private;
  581. struct gb_svc *svc = pwrmon_rails->svc;
  582. int ret, desc;
  583. u32 value;
  584. char buff[16];
  585. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  586. GB_SVC_PWRMON_TYPE_CURR, &value);
  587. if (ret) {
  588. dev_err(&svc->dev,
  589. "failed to get current sample %u: %d\n",
  590. pwrmon_rails->id, ret);
  591. return ret;
  592. }
  593. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  594. return simple_read_from_buffer(buf, len, offset, buff, desc);
  595. }
  596. static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
  597. size_t len, loff_t *offset)
  598. {
  599. struct svc_debugfs_pwrmon_rail *pwrmon_rails =
  600. file_inode(file)->i_private;
  601. struct gb_svc *svc = pwrmon_rails->svc;
  602. int ret, desc;
  603. u32 value;
  604. char buff[16];
  605. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  606. GB_SVC_PWRMON_TYPE_PWR, &value);
  607. if (ret) {
  608. dev_err(&svc->dev, "failed to get power sample %u: %d\n",
  609. pwrmon_rails->id, ret);
  610. return ret;
  611. }
  612. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  613. return simple_read_from_buffer(buf, len, offset, buff, desc);
  614. }
  615. static const struct file_operations pwrmon_debugfs_voltage_fops = {
  616. .read = pwr_debugfs_voltage_read,
  617. };
  618. static const struct file_operations pwrmon_debugfs_current_fops = {
  619. .read = pwr_debugfs_current_read,
  620. };
  621. static const struct file_operations pwrmon_debugfs_power_fops = {
  622. .read = pwr_debugfs_power_read,
  623. };
  624. static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
  625. {
  626. int i;
  627. size_t bufsize;
  628. struct dentry *dent;
  629. struct gb_svc_pwrmon_rail_names_get_response *rail_names;
  630. u8 rail_count;
  631. dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
  632. if (IS_ERR_OR_NULL(dent))
  633. return;
  634. if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
  635. goto err_pwrmon_debugfs;
  636. if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
  637. goto err_pwrmon_debugfs;
  638. bufsize = sizeof(*rail_names) +
  639. GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
  640. rail_names = kzalloc(bufsize, GFP_KERNEL);
  641. if (!rail_names)
  642. goto err_pwrmon_debugfs;
  643. svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
  644. GFP_KERNEL);
  645. if (!svc->pwrmon_rails)
  646. goto err_pwrmon_debugfs_free;
  647. if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
  648. goto err_pwrmon_debugfs_free;
  649. for (i = 0; i < rail_count; i++) {
  650. struct dentry *dir;
  651. struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
  652. char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
  653. snprintf(fname, sizeof(fname), "%s",
  654. (char *)&rail_names->name[i]);
  655. rail->id = i;
  656. rail->svc = svc;
  657. dir = debugfs_create_dir(fname, dent);
  658. debugfs_create_file("voltage_now", 0444, dir, rail,
  659. &pwrmon_debugfs_voltage_fops);
  660. debugfs_create_file("current_now", 0444, dir, rail,
  661. &pwrmon_debugfs_current_fops);
  662. debugfs_create_file("power_now", 0444, dir, rail,
  663. &pwrmon_debugfs_power_fops);
  664. }
  665. kfree(rail_names);
  666. return;
  667. err_pwrmon_debugfs_free:
  668. kfree(rail_names);
  669. kfree(svc->pwrmon_rails);
  670. svc->pwrmon_rails = NULL;
  671. err_pwrmon_debugfs:
  672. debugfs_remove(dent);
  673. }
  674. static void gb_svc_debugfs_init(struct gb_svc *svc)
  675. {
  676. svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
  677. gb_debugfs_get());
  678. gb_svc_pwrmon_debugfs_init(svc);
  679. }
  680. static void gb_svc_debugfs_exit(struct gb_svc *svc)
  681. {
  682. debugfs_remove_recursive(svc->debugfs_dentry);
  683. kfree(svc->pwrmon_rails);
  684. svc->pwrmon_rails = NULL;
  685. }
  686. static int gb_svc_hello(struct gb_operation *op)
  687. {
  688. struct gb_connection *connection = op->connection;
  689. struct gb_svc *svc = gb_connection_get_data(connection);
  690. struct gb_svc_hello_request *hello_request;
  691. int ret;
  692. if (op->request->payload_size < sizeof(*hello_request)) {
  693. dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
  694. op->request->payload_size,
  695. sizeof(*hello_request));
  696. return -EINVAL;
  697. }
  698. hello_request = op->request->payload;
  699. svc->endo_id = le16_to_cpu(hello_request->endo_id);
  700. svc->ap_intf_id = hello_request->interface_id;
  701. ret = device_add(&svc->dev);
  702. if (ret) {
  703. dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
  704. return ret;
  705. }
  706. ret = gb_svc_watchdog_create(svc);
  707. if (ret) {
  708. dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
  709. goto err_deregister_svc;
  710. }
  711. /*
  712. * FIXME: This is a temporary hack to reconfigure the link at HELLO
  713. * (which abuses the deferred request processing mechanism).
  714. */
  715. ret = gb_svc_queue_deferred_request(op);
  716. if (ret)
  717. goto err_destroy_watchdog;
  718. gb_svc_debugfs_init(svc);
  719. return 0;
  720. err_destroy_watchdog:
  721. gb_svc_watchdog_destroy(svc);
  722. err_deregister_svc:
  723. device_del(&svc->dev);
  724. return ret;
  725. }
  726. static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
  727. u8 intf_id)
  728. {
  729. struct gb_host_device *hd = svc->hd;
  730. struct gb_module *module;
  731. size_t num_interfaces;
  732. u8 module_id;
  733. list_for_each_entry(module, &hd->modules, hd_node) {
  734. module_id = module->module_id;
  735. num_interfaces = module->num_interfaces;
  736. if (intf_id >= module_id &&
  737. intf_id < module_id + num_interfaces) {
  738. return module->interfaces[intf_id - module_id];
  739. }
  740. }
  741. return NULL;
  742. }
  743. static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
  744. {
  745. struct gb_host_device *hd = svc->hd;
  746. struct gb_module *module;
  747. list_for_each_entry(module, &hd->modules, hd_node) {
  748. if (module->module_id == module_id)
  749. return module;
  750. }
  751. return NULL;
  752. }
  753. static void gb_svc_process_hello_deferred(struct gb_operation *operation)
  754. {
  755. struct gb_connection *connection = operation->connection;
  756. struct gb_svc *svc = gb_connection_get_data(connection);
  757. int ret;
  758. /*
  759. * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
  760. * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
  761. * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
  762. * module.
  763. *
  764. * The code should be removed once SW-2217, Heuristic for UniPro
  765. * Power Mode Changes is resolved.
  766. */
  767. ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
  768. GB_SVC_UNIPRO_HS_SERIES_A,
  769. GB_SVC_UNIPRO_SLOW_AUTO_MODE,
  770. 2, 1,
  771. GB_SVC_SMALL_AMPLITUDE,
  772. GB_SVC_NO_DE_EMPHASIS,
  773. GB_SVC_UNIPRO_SLOW_AUTO_MODE,
  774. 2, 1,
  775. 0, 0,
  776. NULL, NULL);
  777. if (ret)
  778. dev_warn(&svc->dev,
  779. "power mode change failed on AP to switch link: %d\n",
  780. ret);
  781. }
  782. static void gb_svc_process_module_inserted(struct gb_operation *operation)
  783. {
  784. struct gb_svc_module_inserted_request *request;
  785. struct gb_connection *connection = operation->connection;
  786. struct gb_svc *svc = gb_connection_get_data(connection);
  787. struct gb_host_device *hd = svc->hd;
  788. struct gb_module *module;
  789. size_t num_interfaces;
  790. u8 module_id;
  791. u16 flags;
  792. int ret;
  793. /* The request message size has already been verified. */
  794. request = operation->request->payload;
  795. module_id = request->primary_intf_id;
  796. num_interfaces = request->intf_count;
  797. flags = le16_to_cpu(request->flags);
  798. dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
  799. __func__, module_id, num_interfaces, flags);
  800. if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
  801. dev_warn(&svc->dev, "no primary interface detected on module %u\n",
  802. module_id);
  803. }
  804. module = gb_svc_module_lookup(svc, module_id);
  805. if (module) {
  806. dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
  807. module_id);
  808. return;
  809. }
  810. module = gb_module_create(hd, module_id, num_interfaces);
  811. if (!module) {
  812. dev_err(&svc->dev, "failed to create module\n");
  813. return;
  814. }
  815. ret = gb_module_add(module);
  816. if (ret) {
  817. gb_module_put(module);
  818. return;
  819. }
  820. list_add(&module->hd_node, &hd->modules);
  821. }
  822. static void gb_svc_process_module_removed(struct gb_operation *operation)
  823. {
  824. struct gb_svc_module_removed_request *request;
  825. struct gb_connection *connection = operation->connection;
  826. struct gb_svc *svc = gb_connection_get_data(connection);
  827. struct gb_module *module;
  828. u8 module_id;
  829. /* The request message size has already been verified. */
  830. request = operation->request->payload;
  831. module_id = request->primary_intf_id;
  832. dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
  833. module = gb_svc_module_lookup(svc, module_id);
  834. if (!module) {
  835. dev_warn(&svc->dev, "unexpected module-removed event %u\n",
  836. module_id);
  837. return;
  838. }
  839. module->disconnected = true;
  840. gb_module_del(module);
  841. list_del(&module->hd_node);
  842. gb_module_put(module);
  843. }
  844. static void gb_svc_process_intf_oops(struct gb_operation *operation)
  845. {
  846. struct gb_svc_intf_oops_request *request;
  847. struct gb_connection *connection = operation->connection;
  848. struct gb_svc *svc = gb_connection_get_data(connection);
  849. struct gb_interface *intf;
  850. u8 intf_id;
  851. u8 reason;
  852. /* The request message size has already been verified. */
  853. request = operation->request->payload;
  854. intf_id = request->intf_id;
  855. reason = request->reason;
  856. intf = gb_svc_interface_lookup(svc, intf_id);
  857. if (!intf) {
  858. dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
  859. intf_id);
  860. return;
  861. }
  862. dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
  863. intf_id, reason);
  864. mutex_lock(&intf->mutex);
  865. intf->disconnected = true;
  866. gb_interface_disable(intf);
  867. gb_interface_deactivate(intf);
  868. mutex_unlock(&intf->mutex);
  869. }
  870. static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
  871. {
  872. struct gb_svc_intf_mailbox_event_request *request;
  873. struct gb_connection *connection = operation->connection;
  874. struct gb_svc *svc = gb_connection_get_data(connection);
  875. struct gb_interface *intf;
  876. u8 intf_id;
  877. u16 result_code;
  878. u32 mailbox;
  879. /* The request message size has already been verified. */
  880. request = operation->request->payload;
  881. intf_id = request->intf_id;
  882. result_code = le16_to_cpu(request->result_code);
  883. mailbox = le32_to_cpu(request->mailbox);
  884. dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
  885. __func__, intf_id, result_code, mailbox);
  886. intf = gb_svc_interface_lookup(svc, intf_id);
  887. if (!intf) {
  888. dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
  889. return;
  890. }
  891. gb_interface_mailbox_event(intf, result_code, mailbox);
  892. }
  893. static void gb_svc_process_deferred_request(struct work_struct *work)
  894. {
  895. struct gb_svc_deferred_request *dr;
  896. struct gb_operation *operation;
  897. struct gb_svc *svc;
  898. u8 type;
  899. dr = container_of(work, struct gb_svc_deferred_request, work);
  900. operation = dr->operation;
  901. svc = gb_connection_get_data(operation->connection);
  902. type = operation->request->header->type;
  903. switch (type) {
  904. case GB_SVC_TYPE_SVC_HELLO:
  905. gb_svc_process_hello_deferred(operation);
  906. break;
  907. case GB_SVC_TYPE_MODULE_INSERTED:
  908. gb_svc_process_module_inserted(operation);
  909. break;
  910. case GB_SVC_TYPE_MODULE_REMOVED:
  911. gb_svc_process_module_removed(operation);
  912. break;
  913. case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
  914. gb_svc_process_intf_mailbox_event(operation);
  915. break;
  916. case GB_SVC_TYPE_INTF_OOPS:
  917. gb_svc_process_intf_oops(operation);
  918. break;
  919. default:
  920. dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
  921. }
  922. gb_operation_put(operation);
  923. kfree(dr);
  924. }
  925. static int gb_svc_queue_deferred_request(struct gb_operation *operation)
  926. {
  927. struct gb_svc *svc = gb_connection_get_data(operation->connection);
  928. struct gb_svc_deferred_request *dr;
  929. dr = kmalloc(sizeof(*dr), GFP_KERNEL);
  930. if (!dr)
  931. return -ENOMEM;
  932. gb_operation_get(operation);
  933. dr->operation = operation;
  934. INIT_WORK(&dr->work, gb_svc_process_deferred_request);
  935. queue_work(svc->wq, &dr->work);
  936. return 0;
  937. }
  938. static int gb_svc_intf_reset_recv(struct gb_operation *op)
  939. {
  940. struct gb_svc *svc = gb_connection_get_data(op->connection);
  941. struct gb_message *request = op->request;
  942. struct gb_svc_intf_reset_request *reset;
  943. if (request->payload_size < sizeof(*reset)) {
  944. dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
  945. request->payload_size, sizeof(*reset));
  946. return -EINVAL;
  947. }
  948. reset = request->payload;
  949. /* FIXME Reset the interface here */
  950. return 0;
  951. }
  952. static int gb_svc_module_inserted_recv(struct gb_operation *op)
  953. {
  954. struct gb_svc *svc = gb_connection_get_data(op->connection);
  955. struct gb_svc_module_inserted_request *request;
  956. if (op->request->payload_size < sizeof(*request)) {
  957. dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
  958. op->request->payload_size, sizeof(*request));
  959. return -EINVAL;
  960. }
  961. request = op->request->payload;
  962. dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
  963. request->primary_intf_id);
  964. return gb_svc_queue_deferred_request(op);
  965. }
  966. static int gb_svc_module_removed_recv(struct gb_operation *op)
  967. {
  968. struct gb_svc *svc = gb_connection_get_data(op->connection);
  969. struct gb_svc_module_removed_request *request;
  970. if (op->request->payload_size < sizeof(*request)) {
  971. dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
  972. op->request->payload_size, sizeof(*request));
  973. return -EINVAL;
  974. }
  975. request = op->request->payload;
  976. dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
  977. request->primary_intf_id);
  978. return gb_svc_queue_deferred_request(op);
  979. }
  980. static int gb_svc_intf_oops_recv(struct gb_operation *op)
  981. {
  982. struct gb_svc *svc = gb_connection_get_data(op->connection);
  983. struct gb_svc_intf_oops_request *request;
  984. if (op->request->payload_size < sizeof(*request)) {
  985. dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
  986. op->request->payload_size, sizeof(*request));
  987. return -EINVAL;
  988. }
  989. return gb_svc_queue_deferred_request(op);
  990. }
  991. static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
  992. {
  993. struct gb_svc *svc = gb_connection_get_data(op->connection);
  994. struct gb_svc_intf_mailbox_event_request *request;
  995. if (op->request->payload_size < sizeof(*request)) {
  996. dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
  997. op->request->payload_size, sizeof(*request));
  998. return -EINVAL;
  999. }
  1000. request = op->request->payload;
  1001. dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
  1002. return gb_svc_queue_deferred_request(op);
  1003. }
  1004. static int gb_svc_request_handler(struct gb_operation *op)
  1005. {
  1006. struct gb_connection *connection = op->connection;
  1007. struct gb_svc *svc = gb_connection_get_data(connection);
  1008. u8 type = op->type;
  1009. int ret = 0;
  1010. /*
  1011. * SVC requests need to follow a specific order (at least initially) and
  1012. * below code takes care of enforcing that. The expected order is:
  1013. * - PROTOCOL_VERSION
  1014. * - SVC_HELLO
  1015. * - Any other request, but the earlier two.
  1016. *
  1017. * Incoming requests are guaranteed to be serialized and so we don't
  1018. * need to protect 'state' for any races.
  1019. */
  1020. switch (type) {
  1021. case GB_SVC_TYPE_PROTOCOL_VERSION:
  1022. if (svc->state != GB_SVC_STATE_RESET)
  1023. ret = -EINVAL;
  1024. break;
  1025. case GB_SVC_TYPE_SVC_HELLO:
  1026. if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
  1027. ret = -EINVAL;
  1028. break;
  1029. default:
  1030. if (svc->state != GB_SVC_STATE_SVC_HELLO)
  1031. ret = -EINVAL;
  1032. break;
  1033. }
  1034. if (ret) {
  1035. dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
  1036. type, svc->state);
  1037. return ret;
  1038. }
  1039. switch (type) {
  1040. case GB_SVC_TYPE_PROTOCOL_VERSION:
  1041. ret = gb_svc_version_request(op);
  1042. if (!ret)
  1043. svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
  1044. return ret;
  1045. case GB_SVC_TYPE_SVC_HELLO:
  1046. ret = gb_svc_hello(op);
  1047. if (!ret)
  1048. svc->state = GB_SVC_STATE_SVC_HELLO;
  1049. return ret;
  1050. case GB_SVC_TYPE_INTF_RESET:
  1051. return gb_svc_intf_reset_recv(op);
  1052. case GB_SVC_TYPE_MODULE_INSERTED:
  1053. return gb_svc_module_inserted_recv(op);
  1054. case GB_SVC_TYPE_MODULE_REMOVED:
  1055. return gb_svc_module_removed_recv(op);
  1056. case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
  1057. return gb_svc_intf_mailbox_event_recv(op);
  1058. case GB_SVC_TYPE_INTF_OOPS:
  1059. return gb_svc_intf_oops_recv(op);
  1060. default:
  1061. dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
  1062. return -EINVAL;
  1063. }
  1064. }
  1065. static void gb_svc_release(struct device *dev)
  1066. {
  1067. struct gb_svc *svc = to_gb_svc(dev);
  1068. if (svc->connection)
  1069. gb_connection_destroy(svc->connection);
  1070. ida_destroy(&svc->device_id_map);
  1071. destroy_workqueue(svc->wq);
  1072. kfree(svc);
  1073. }
  1074. struct device_type greybus_svc_type = {
  1075. .name = "greybus_svc",
  1076. .release = gb_svc_release,
  1077. };
  1078. struct gb_svc *gb_svc_create(struct gb_host_device *hd)
  1079. {
  1080. struct gb_svc *svc;
  1081. svc = kzalloc(sizeof(*svc), GFP_KERNEL);
  1082. if (!svc)
  1083. return NULL;
  1084. svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
  1085. if (!svc->wq) {
  1086. kfree(svc);
  1087. return NULL;
  1088. }
  1089. svc->dev.parent = &hd->dev;
  1090. svc->dev.bus = &greybus_bus_type;
  1091. svc->dev.type = &greybus_svc_type;
  1092. svc->dev.groups = svc_groups;
  1093. svc->dev.dma_mask = svc->dev.parent->dma_mask;
  1094. device_initialize(&svc->dev);
  1095. dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
  1096. ida_init(&svc->device_id_map);
  1097. svc->state = GB_SVC_STATE_RESET;
  1098. svc->hd = hd;
  1099. svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
  1100. gb_svc_request_handler);
  1101. if (IS_ERR(svc->connection)) {
  1102. dev_err(&svc->dev, "failed to create connection: %ld\n",
  1103. PTR_ERR(svc->connection));
  1104. goto err_put_device;
  1105. }
  1106. gb_connection_set_data(svc->connection, svc);
  1107. return svc;
  1108. err_put_device:
  1109. put_device(&svc->dev);
  1110. return NULL;
  1111. }
  1112. int gb_svc_add(struct gb_svc *svc)
  1113. {
  1114. int ret;
  1115. /*
  1116. * The SVC protocol is currently driven by the SVC, so the SVC device
  1117. * is added from the connection request handler when enough
  1118. * information has been received.
  1119. */
  1120. ret = gb_connection_enable(svc->connection);
  1121. if (ret)
  1122. return ret;
  1123. return 0;
  1124. }
  1125. static void gb_svc_remove_modules(struct gb_svc *svc)
  1126. {
  1127. struct gb_host_device *hd = svc->hd;
  1128. struct gb_module *module, *tmp;
  1129. list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
  1130. gb_module_del(module);
  1131. list_del(&module->hd_node);
  1132. gb_module_put(module);
  1133. }
  1134. }
  1135. void gb_svc_del(struct gb_svc *svc)
  1136. {
  1137. gb_connection_disable_rx(svc->connection);
  1138. /*
  1139. * The SVC device may have been registered from the request handler.
  1140. */
  1141. if (device_is_registered(&svc->dev)) {
  1142. gb_svc_debugfs_exit(svc);
  1143. gb_svc_watchdog_destroy(svc);
  1144. device_del(&svc->dev);
  1145. }
  1146. flush_workqueue(svc->wq);
  1147. gb_svc_remove_modules(svc);
  1148. gb_connection_disable(svc->connection);
  1149. }
  1150. void gb_svc_put(struct gb_svc *svc)
  1151. {
  1152. put_device(&svc->dev);
  1153. }