control.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Greybus CPort control protocol.
  4. *
  5. * Copyright 2015 Google Inc.
  6. * Copyright 2015 Linaro Ltd.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/greybus.h>
  12. /* Highest control-protocol version supported */
  13. #define GB_CONTROL_VERSION_MAJOR 0
  14. #define GB_CONTROL_VERSION_MINOR 1
  15. static int gb_control_get_version(struct gb_control *control)
  16. {
  17. struct gb_interface *intf = control->connection->intf;
  18. struct gb_control_version_request request;
  19. struct gb_control_version_response response;
  20. int ret;
  21. request.major = GB_CONTROL_VERSION_MAJOR;
  22. request.minor = GB_CONTROL_VERSION_MINOR;
  23. ret = gb_operation_sync(control->connection,
  24. GB_CONTROL_TYPE_VERSION,
  25. &request, sizeof(request), &response,
  26. sizeof(response));
  27. if (ret) {
  28. dev_err(&intf->dev,
  29. "failed to get control-protocol version: %d\n",
  30. ret);
  31. return ret;
  32. }
  33. if (response.major > request.major) {
  34. dev_err(&intf->dev,
  35. "unsupported major control-protocol version (%u > %u)\n",
  36. response.major, request.major);
  37. return -ENOTSUPP;
  38. }
  39. control->protocol_major = response.major;
  40. control->protocol_minor = response.minor;
  41. dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
  42. response.minor);
  43. return 0;
  44. }
  45. static int gb_control_get_bundle_version(struct gb_control *control,
  46. struct gb_bundle *bundle)
  47. {
  48. struct gb_interface *intf = control->connection->intf;
  49. struct gb_control_bundle_version_request request;
  50. struct gb_control_bundle_version_response response;
  51. int ret;
  52. request.bundle_id = bundle->id;
  53. ret = gb_operation_sync(control->connection,
  54. GB_CONTROL_TYPE_BUNDLE_VERSION,
  55. &request, sizeof(request),
  56. &response, sizeof(response));
  57. if (ret) {
  58. dev_err(&intf->dev,
  59. "failed to get bundle %u class version: %d\n",
  60. bundle->id, ret);
  61. return ret;
  62. }
  63. bundle->class_major = response.major;
  64. bundle->class_minor = response.minor;
  65. dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
  66. response.major, response.minor);
  67. return 0;
  68. }
  69. int gb_control_get_bundle_versions(struct gb_control *control)
  70. {
  71. struct gb_interface *intf = control->connection->intf;
  72. struct gb_bundle *bundle;
  73. int ret;
  74. if (!control->has_bundle_version)
  75. return 0;
  76. list_for_each_entry(bundle, &intf->bundles, links) {
  77. ret = gb_control_get_bundle_version(control, bundle);
  78. if (ret)
  79. return ret;
  80. }
  81. return 0;
  82. }
  83. /* Get Manifest's size from the interface */
  84. int gb_control_get_manifest_size_operation(struct gb_interface *intf)
  85. {
  86. struct gb_control_get_manifest_size_response response;
  87. struct gb_connection *connection = intf->control->connection;
  88. int ret;
  89. ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
  90. NULL, 0, &response, sizeof(response));
  91. if (ret) {
  92. dev_err(&connection->intf->dev,
  93. "failed to get manifest size: %d\n", ret);
  94. return ret;
  95. }
  96. return le16_to_cpu(response.size);
  97. }
  98. /* Reads Manifest from the interface */
  99. int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
  100. size_t size)
  101. {
  102. struct gb_connection *connection = intf->control->connection;
  103. return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
  104. NULL, 0, manifest, size);
  105. }
  106. int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
  107. {
  108. struct gb_control_connected_request request;
  109. request.cport_id = cpu_to_le16(cport_id);
  110. return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
  111. &request, sizeof(request), NULL, 0);
  112. }
  113. int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
  114. {
  115. struct gb_control_disconnected_request request;
  116. request.cport_id = cpu_to_le16(cport_id);
  117. return gb_operation_sync(control->connection,
  118. GB_CONTROL_TYPE_DISCONNECTED, &request,
  119. sizeof(request), NULL, 0);
  120. }
  121. int gb_control_disconnecting_operation(struct gb_control *control,
  122. u16 cport_id)
  123. {
  124. struct gb_control_disconnecting_request *request;
  125. struct gb_operation *operation;
  126. int ret;
  127. operation = gb_operation_create_core(control->connection,
  128. GB_CONTROL_TYPE_DISCONNECTING,
  129. sizeof(*request), 0, 0,
  130. GFP_KERNEL);
  131. if (!operation)
  132. return -ENOMEM;
  133. request = operation->request->payload;
  134. request->cport_id = cpu_to_le16(cport_id);
  135. ret = gb_operation_request_send_sync(operation);
  136. if (ret) {
  137. dev_err(&control->dev, "failed to send disconnecting: %d\n",
  138. ret);
  139. }
  140. gb_operation_put(operation);
  141. return ret;
  142. }
  143. int gb_control_mode_switch_operation(struct gb_control *control)
  144. {
  145. struct gb_operation *operation;
  146. int ret;
  147. operation = gb_operation_create_core(control->connection,
  148. GB_CONTROL_TYPE_MODE_SWITCH,
  149. 0, 0,
  150. GB_OPERATION_FLAG_UNIDIRECTIONAL,
  151. GFP_KERNEL);
  152. if (!operation)
  153. return -ENOMEM;
  154. ret = gb_operation_request_send_sync(operation);
  155. if (ret)
  156. dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
  157. gb_operation_put(operation);
  158. return ret;
  159. }
  160. static int gb_control_bundle_pm_status_map(u8 status)
  161. {
  162. switch (status) {
  163. case GB_CONTROL_BUNDLE_PM_INVAL:
  164. return -EINVAL;
  165. case GB_CONTROL_BUNDLE_PM_BUSY:
  166. return -EBUSY;
  167. case GB_CONTROL_BUNDLE_PM_NA:
  168. return -ENOMSG;
  169. case GB_CONTROL_BUNDLE_PM_FAIL:
  170. default:
  171. return -EREMOTEIO;
  172. }
  173. }
  174. int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
  175. {
  176. struct gb_control_bundle_pm_request request;
  177. struct gb_control_bundle_pm_response response;
  178. int ret;
  179. request.bundle_id = bundle_id;
  180. ret = gb_operation_sync(control->connection,
  181. GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
  182. sizeof(request), &response, sizeof(response));
  183. if (ret) {
  184. dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
  185. bundle_id, ret);
  186. return ret;
  187. }
  188. if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
  189. dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
  190. bundle_id, response.status);
  191. return gb_control_bundle_pm_status_map(response.status);
  192. }
  193. return 0;
  194. }
  195. int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
  196. {
  197. struct gb_control_bundle_pm_request request;
  198. struct gb_control_bundle_pm_response response;
  199. int ret;
  200. request.bundle_id = bundle_id;
  201. ret = gb_operation_sync(control->connection,
  202. GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
  203. sizeof(request), &response, sizeof(response));
  204. if (ret) {
  205. dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
  206. bundle_id, ret);
  207. return ret;
  208. }
  209. if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
  210. dev_err(&control->dev, "failed to resume bundle %u: %d\n",
  211. bundle_id, response.status);
  212. return gb_control_bundle_pm_status_map(response.status);
  213. }
  214. return 0;
  215. }
  216. int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
  217. {
  218. struct gb_control_bundle_pm_request request;
  219. struct gb_control_bundle_pm_response response;
  220. int ret;
  221. request.bundle_id = bundle_id;
  222. ret = gb_operation_sync(control->connection,
  223. GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
  224. sizeof(request), &response, sizeof(response));
  225. if (ret) {
  226. dev_err(&control->dev,
  227. "failed to send bundle %u deactivate: %d\n", bundle_id,
  228. ret);
  229. return ret;
  230. }
  231. if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
  232. dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
  233. bundle_id, response.status);
  234. return gb_control_bundle_pm_status_map(response.status);
  235. }
  236. return 0;
  237. }
  238. int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
  239. {
  240. struct gb_control_bundle_pm_request request;
  241. struct gb_control_bundle_pm_response response;
  242. int ret;
  243. if (!control->has_bundle_activate)
  244. return 0;
  245. request.bundle_id = bundle_id;
  246. ret = gb_operation_sync(control->connection,
  247. GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
  248. sizeof(request), &response, sizeof(response));
  249. if (ret) {
  250. dev_err(&control->dev,
  251. "failed to send bundle %u activate: %d\n", bundle_id,
  252. ret);
  253. return ret;
  254. }
  255. if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
  256. dev_err(&control->dev, "failed to activate bundle %u: %d\n",
  257. bundle_id, response.status);
  258. return gb_control_bundle_pm_status_map(response.status);
  259. }
  260. return 0;
  261. }
  262. static int gb_control_interface_pm_status_map(u8 status)
  263. {
  264. switch (status) {
  265. case GB_CONTROL_INTF_PM_BUSY:
  266. return -EBUSY;
  267. case GB_CONTROL_INTF_PM_NA:
  268. return -ENOMSG;
  269. default:
  270. return -EREMOTEIO;
  271. }
  272. }
  273. int gb_control_interface_suspend_prepare(struct gb_control *control)
  274. {
  275. struct gb_control_intf_pm_response response;
  276. int ret;
  277. ret = gb_operation_sync(control->connection,
  278. GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
  279. &response, sizeof(response));
  280. if (ret) {
  281. dev_err(&control->dev,
  282. "failed to send interface suspend prepare: %d\n", ret);
  283. return ret;
  284. }
  285. if (response.status != GB_CONTROL_INTF_PM_OK) {
  286. dev_err(&control->dev, "interface error while preparing suspend: %d\n",
  287. response.status);
  288. return gb_control_interface_pm_status_map(response.status);
  289. }
  290. return 0;
  291. }
  292. int gb_control_interface_deactivate_prepare(struct gb_control *control)
  293. {
  294. struct gb_control_intf_pm_response response;
  295. int ret;
  296. ret = gb_operation_sync(control->connection,
  297. GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
  298. 0, &response, sizeof(response));
  299. if (ret) {
  300. dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
  301. ret);
  302. return ret;
  303. }
  304. if (response.status != GB_CONTROL_INTF_PM_OK) {
  305. dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
  306. response.status);
  307. return gb_control_interface_pm_status_map(response.status);
  308. }
  309. return 0;
  310. }
  311. int gb_control_interface_hibernate_abort(struct gb_control *control)
  312. {
  313. struct gb_control_intf_pm_response response;
  314. int ret;
  315. ret = gb_operation_sync(control->connection,
  316. GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
  317. &response, sizeof(response));
  318. if (ret) {
  319. dev_err(&control->dev,
  320. "failed to send interface aborting hibernate: %d\n",
  321. ret);
  322. return ret;
  323. }
  324. if (response.status != GB_CONTROL_INTF_PM_OK) {
  325. dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
  326. response.status);
  327. return gb_control_interface_pm_status_map(response.status);
  328. }
  329. return 0;
  330. }
  331. static ssize_t vendor_string_show(struct device *dev,
  332. struct device_attribute *attr, char *buf)
  333. {
  334. struct gb_control *control = to_gb_control(dev);
  335. return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
  336. }
  337. static DEVICE_ATTR_RO(vendor_string);
  338. static ssize_t product_string_show(struct device *dev,
  339. struct device_attribute *attr, char *buf)
  340. {
  341. struct gb_control *control = to_gb_control(dev);
  342. return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
  343. }
  344. static DEVICE_ATTR_RO(product_string);
  345. static struct attribute *control_attrs[] = {
  346. &dev_attr_vendor_string.attr,
  347. &dev_attr_product_string.attr,
  348. NULL,
  349. };
  350. ATTRIBUTE_GROUPS(control);
  351. static void gb_control_release(struct device *dev)
  352. {
  353. struct gb_control *control = to_gb_control(dev);
  354. gb_connection_destroy(control->connection);
  355. kfree(control->vendor_string);
  356. kfree(control->product_string);
  357. kfree(control);
  358. }
  359. struct device_type greybus_control_type = {
  360. .name = "greybus_control",
  361. .release = gb_control_release,
  362. };
  363. struct gb_control *gb_control_create(struct gb_interface *intf)
  364. {
  365. struct gb_connection *connection;
  366. struct gb_control *control;
  367. control = kzalloc(sizeof(*control), GFP_KERNEL);
  368. if (!control)
  369. return ERR_PTR(-ENOMEM);
  370. control->intf = intf;
  371. connection = gb_connection_create_control(intf);
  372. if (IS_ERR(connection)) {
  373. dev_err(&intf->dev,
  374. "failed to create control connection: %ld\n",
  375. PTR_ERR(connection));
  376. kfree(control);
  377. return ERR_CAST(connection);
  378. }
  379. control->connection = connection;
  380. control->dev.parent = &intf->dev;
  381. control->dev.bus = &greybus_bus_type;
  382. control->dev.type = &greybus_control_type;
  383. control->dev.groups = control_groups;
  384. control->dev.dma_mask = intf->dev.dma_mask;
  385. device_initialize(&control->dev);
  386. dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
  387. gb_connection_set_data(control->connection, control);
  388. return control;
  389. }
  390. int gb_control_enable(struct gb_control *control)
  391. {
  392. int ret;
  393. dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
  394. ret = gb_connection_enable_tx(control->connection);
  395. if (ret) {
  396. dev_err(&control->connection->intf->dev,
  397. "failed to enable control connection: %d\n",
  398. ret);
  399. return ret;
  400. }
  401. ret = gb_control_get_version(control);
  402. if (ret)
  403. goto err_disable_connection;
  404. if (control->protocol_major > 0 || control->protocol_minor > 1)
  405. control->has_bundle_version = true;
  406. /* FIXME: use protocol version instead */
  407. if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
  408. control->has_bundle_activate = true;
  409. return 0;
  410. err_disable_connection:
  411. gb_connection_disable(control->connection);
  412. return ret;
  413. }
  414. void gb_control_disable(struct gb_control *control)
  415. {
  416. dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
  417. if (control->intf->disconnected)
  418. gb_connection_disable_forced(control->connection);
  419. else
  420. gb_connection_disable(control->connection);
  421. }
  422. int gb_control_suspend(struct gb_control *control)
  423. {
  424. gb_connection_disable(control->connection);
  425. return 0;
  426. }
  427. int gb_control_resume(struct gb_control *control)
  428. {
  429. int ret;
  430. ret = gb_connection_enable_tx(control->connection);
  431. if (ret) {
  432. dev_err(&control->connection->intf->dev,
  433. "failed to enable control connection: %d\n", ret);
  434. return ret;
  435. }
  436. return 0;
  437. }
  438. int gb_control_add(struct gb_control *control)
  439. {
  440. int ret;
  441. ret = device_add(&control->dev);
  442. if (ret) {
  443. dev_err(&control->dev,
  444. "failed to register control device: %d\n",
  445. ret);
  446. return ret;
  447. }
  448. return 0;
  449. }
  450. void gb_control_del(struct gb_control *control)
  451. {
  452. if (device_is_registered(&control->dev))
  453. device_del(&control->dev);
  454. }
  455. struct gb_control *gb_control_get(struct gb_control *control)
  456. {
  457. get_device(&control->dev);
  458. return control;
  459. }
  460. void gb_control_put(struct gb_control *control)
  461. {
  462. put_device(&control->dev);
  463. }
  464. void gb_control_mode_switch_prepare(struct gb_control *control)
  465. {
  466. gb_connection_mode_switch_prepare(control->connection);
  467. }
  468. void gb_control_mode_switch_complete(struct gb_control *control)
  469. {
  470. gb_connection_mode_switch_complete(control->connection);
  471. }