interface.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Greybus interface code
  4. *
  5. * Copyright 2014 Google Inc.
  6. * Copyright 2014 Linaro Ltd.
  7. */
  8. #include <linux/delay.h>
  9. #include <linux/greybus.h>
  10. #include "greybus_trace.h"
  11. #define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
  12. #define GB_INTERFACE_DEVICE_ID_BAD 0xff
  13. #define GB_INTERFACE_AUTOSUSPEND_MS 3000
  14. /* Time required for interface to enter standby before disabling REFCLK */
  15. #define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20
  16. /* Don't-care selector index */
  17. #define DME_SELECTOR_INDEX_NULL 0
  18. /* DME attributes */
  19. /* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
  20. #define DME_T_TST_SRC_INCREMENT 0x4083
  21. #define DME_DDBL1_MANUFACTURERID 0x5003
  22. #define DME_DDBL1_PRODUCTID 0x5004
  23. #define DME_TOSHIBA_GMP_VID 0x6000
  24. #define DME_TOSHIBA_GMP_PID 0x6001
  25. #define DME_TOSHIBA_GMP_SN0 0x6002
  26. #define DME_TOSHIBA_GMP_SN1 0x6003
  27. #define DME_TOSHIBA_GMP_INIT_STATUS 0x6101
  28. /* DDBL1 Manufacturer and Product ids */
  29. #define TOSHIBA_DMID 0x0126
  30. #define TOSHIBA_ES2_BRIDGE_DPID 0x1000
  31. #define TOSHIBA_ES3_APBRIDGE_DPID 0x1001
  32. #define TOSHIBA_ES3_GBPHY_DPID 0x1002
  33. static int gb_interface_hibernate_link(struct gb_interface *intf);
  34. static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
  35. static int gb_interface_dme_attr_get(struct gb_interface *intf,
  36. u16 attr, u32 *val)
  37. {
  38. return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
  39. attr, DME_SELECTOR_INDEX_NULL, val);
  40. }
  41. static int gb_interface_read_ara_dme(struct gb_interface *intf)
  42. {
  43. u32 sn0, sn1;
  44. int ret;
  45. /*
  46. * Unless this is a Toshiba bridge, bail out until we have defined
  47. * standard GMP attributes.
  48. */
  49. if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
  50. dev_err(&intf->dev, "unknown manufacturer %08x\n",
  51. intf->ddbl1_manufacturer_id);
  52. return -ENODEV;
  53. }
  54. ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
  55. &intf->vendor_id);
  56. if (ret)
  57. return ret;
  58. ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
  59. &intf->product_id);
  60. if (ret)
  61. return ret;
  62. ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
  63. if (ret)
  64. return ret;
  65. ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
  66. if (ret)
  67. return ret;
  68. intf->serial_number = (u64)sn1 << 32 | sn0;
  69. return 0;
  70. }
  71. static int gb_interface_read_dme(struct gb_interface *intf)
  72. {
  73. int ret;
  74. /* DME attributes have already been read */
  75. if (intf->dme_read)
  76. return 0;
  77. ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
  78. &intf->ddbl1_manufacturer_id);
  79. if (ret)
  80. return ret;
  81. ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
  82. &intf->ddbl1_product_id);
  83. if (ret)
  84. return ret;
  85. if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
  86. intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
  87. intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
  88. intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
  89. }
  90. ret = gb_interface_read_ara_dme(intf);
  91. if (ret)
  92. return ret;
  93. intf->dme_read = true;
  94. return 0;
  95. }
  96. static int gb_interface_route_create(struct gb_interface *intf)
  97. {
  98. struct gb_svc *svc = intf->hd->svc;
  99. u8 intf_id = intf->interface_id;
  100. u8 device_id;
  101. int ret;
  102. /* Allocate an interface device id. */
  103. ret = ida_simple_get(&svc->device_id_map,
  104. GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
  105. GFP_KERNEL);
  106. if (ret < 0) {
  107. dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
  108. return ret;
  109. }
  110. device_id = ret;
  111. ret = gb_svc_intf_device_id(svc, intf_id, device_id);
  112. if (ret) {
  113. dev_err(&intf->dev, "failed to set device id %u: %d\n",
  114. device_id, ret);
  115. goto err_ida_remove;
  116. }
  117. /* FIXME: Hard-coded AP device id. */
  118. ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
  119. intf_id, device_id);
  120. if (ret) {
  121. dev_err(&intf->dev, "failed to create route: %d\n", ret);
  122. goto err_svc_id_free;
  123. }
  124. intf->device_id = device_id;
  125. return 0;
  126. err_svc_id_free:
  127. /*
  128. * XXX Should we tell SVC that this id doesn't belong to interface
  129. * XXX anymore.
  130. */
  131. err_ida_remove:
  132. ida_simple_remove(&svc->device_id_map, device_id);
  133. return ret;
  134. }
  135. static void gb_interface_route_destroy(struct gb_interface *intf)
  136. {
  137. struct gb_svc *svc = intf->hd->svc;
  138. if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
  139. return;
  140. gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
  141. ida_simple_remove(&svc->device_id_map, intf->device_id);
  142. intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
  143. }
  144. /* Locking: Caller holds the interface mutex. */
  145. static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
  146. {
  147. int ret;
  148. dev_info(&intf->dev, "legacy mode switch detected\n");
  149. /* Mark as disconnected to prevent I/O during disable. */
  150. intf->disconnected = true;
  151. gb_interface_disable(intf);
  152. intf->disconnected = false;
  153. ret = gb_interface_enable(intf);
  154. if (ret) {
  155. dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
  156. gb_interface_deactivate(intf);
  157. }
  158. return ret;
  159. }
  160. void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
  161. u32 mailbox)
  162. {
  163. mutex_lock(&intf->mutex);
  164. if (result) {
  165. dev_warn(&intf->dev,
  166. "mailbox event with UniPro error: 0x%04x\n",
  167. result);
  168. goto err_disable;
  169. }
  170. if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
  171. dev_warn(&intf->dev,
  172. "mailbox event with unexpected value: 0x%08x\n",
  173. mailbox);
  174. goto err_disable;
  175. }
  176. if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
  177. gb_interface_legacy_mode_switch(intf);
  178. goto out_unlock;
  179. }
  180. if (!intf->mode_switch) {
  181. dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
  182. mailbox);
  183. goto err_disable;
  184. }
  185. dev_info(&intf->dev, "mode switch detected\n");
  186. complete(&intf->mode_switch_completion);
  187. out_unlock:
  188. mutex_unlock(&intf->mutex);
  189. return;
  190. err_disable:
  191. gb_interface_disable(intf);
  192. gb_interface_deactivate(intf);
  193. mutex_unlock(&intf->mutex);
  194. }
  195. static void gb_interface_mode_switch_work(struct work_struct *work)
  196. {
  197. struct gb_interface *intf;
  198. struct gb_control *control;
  199. unsigned long timeout;
  200. int ret;
  201. intf = container_of(work, struct gb_interface, mode_switch_work);
  202. mutex_lock(&intf->mutex);
  203. /* Make sure interface is still enabled. */
  204. if (!intf->enabled) {
  205. dev_dbg(&intf->dev, "mode switch aborted\n");
  206. intf->mode_switch = false;
  207. mutex_unlock(&intf->mutex);
  208. goto out_interface_put;
  209. }
  210. /*
  211. * Prepare the control device for mode switch and make sure to get an
  212. * extra reference before it goes away during interface disable.
  213. */
  214. control = gb_control_get(intf->control);
  215. gb_control_mode_switch_prepare(control);
  216. gb_interface_disable(intf);
  217. mutex_unlock(&intf->mutex);
  218. timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
  219. ret = wait_for_completion_interruptible_timeout(
  220. &intf->mode_switch_completion, timeout);
  221. /* Finalise control-connection mode switch. */
  222. gb_control_mode_switch_complete(control);
  223. gb_control_put(control);
  224. if (ret < 0) {
  225. dev_err(&intf->dev, "mode switch interrupted\n");
  226. goto err_deactivate;
  227. } else if (ret == 0) {
  228. dev_err(&intf->dev, "mode switch timed out\n");
  229. goto err_deactivate;
  230. }
  231. /* Re-enable (re-enumerate) interface if still active. */
  232. mutex_lock(&intf->mutex);
  233. intf->mode_switch = false;
  234. if (intf->active) {
  235. ret = gb_interface_enable(intf);
  236. if (ret) {
  237. dev_err(&intf->dev, "failed to re-enable interface: %d\n",
  238. ret);
  239. gb_interface_deactivate(intf);
  240. }
  241. }
  242. mutex_unlock(&intf->mutex);
  243. out_interface_put:
  244. gb_interface_put(intf);
  245. return;
  246. err_deactivate:
  247. mutex_lock(&intf->mutex);
  248. intf->mode_switch = false;
  249. gb_interface_deactivate(intf);
  250. mutex_unlock(&intf->mutex);
  251. gb_interface_put(intf);
  252. }
  253. int gb_interface_request_mode_switch(struct gb_interface *intf)
  254. {
  255. int ret = 0;
  256. mutex_lock(&intf->mutex);
  257. if (intf->mode_switch) {
  258. ret = -EBUSY;
  259. goto out_unlock;
  260. }
  261. intf->mode_switch = true;
  262. reinit_completion(&intf->mode_switch_completion);
  263. /*
  264. * Get a reference to the interface device, which will be put once the
  265. * mode switch is complete.
  266. */
  267. get_device(&intf->dev);
  268. if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
  269. put_device(&intf->dev);
  270. ret = -EBUSY;
  271. goto out_unlock;
  272. }
  273. out_unlock:
  274. mutex_unlock(&intf->mutex);
  275. return ret;
  276. }
  277. EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
  278. /*
  279. * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
  280. * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
  281. * clear it after reading a non-zero value from it.
  282. *
  283. * FIXME: This is module-hardware dependent and needs to be extended for every
  284. * type of module we want to support.
  285. */
  286. static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
  287. {
  288. struct gb_host_device *hd = intf->hd;
  289. unsigned long bootrom_quirks;
  290. unsigned long s2l_quirks;
  291. int ret;
  292. u32 value;
  293. u16 attr;
  294. u8 init_status;
  295. /*
  296. * ES2 bridges use T_TstSrcIncrement for the init status.
  297. *
  298. * FIXME: Remove ES2 support
  299. */
  300. if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
  301. attr = DME_T_TST_SRC_INCREMENT;
  302. else
  303. attr = DME_TOSHIBA_GMP_INIT_STATUS;
  304. ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
  305. DME_SELECTOR_INDEX_NULL, &value);
  306. if (ret)
  307. return ret;
  308. /*
  309. * A nonzero init status indicates the module has finished
  310. * initializing.
  311. */
  312. if (!value) {
  313. dev_err(&intf->dev, "invalid init status\n");
  314. return -ENODEV;
  315. }
  316. /*
  317. * Extract the init status.
  318. *
  319. * For ES2: We need to check lowest 8 bits of 'value'.
  320. * For ES3: We need to check highest 8 bits out of 32 of 'value'.
  321. *
  322. * FIXME: Remove ES2 support
  323. */
  324. if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
  325. init_status = value & 0xff;
  326. else
  327. init_status = value >> 24;
  328. /*
  329. * Check if the interface is executing the quirky ES3 bootrom that,
  330. * for example, requires E2EFC, CSD and CSV to be disabled.
  331. */
  332. bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
  333. GB_INTERFACE_QUIRK_FORCED_DISABLE |
  334. GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
  335. GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
  336. s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
  337. switch (init_status) {
  338. case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
  339. case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
  340. intf->quirks |= bootrom_quirks;
  341. break;
  342. case GB_INIT_S2_LOADER_BOOT_STARTED:
  343. /* S2 Loader doesn't support runtime PM */
  344. intf->quirks &= ~bootrom_quirks;
  345. intf->quirks |= s2l_quirks;
  346. break;
  347. default:
  348. intf->quirks &= ~bootrom_quirks;
  349. intf->quirks &= ~s2l_quirks;
  350. }
  351. /* Clear the init status. */
  352. return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
  353. DME_SELECTOR_INDEX_NULL, 0);
  354. }
  355. /* interface sysfs attributes */
  356. #define gb_interface_attr(field, type) \
  357. static ssize_t field##_show(struct device *dev, \
  358. struct device_attribute *attr, \
  359. char *buf) \
  360. { \
  361. struct gb_interface *intf = to_gb_interface(dev); \
  362. return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \
  363. } \
  364. static DEVICE_ATTR_RO(field)
  365. gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
  366. gb_interface_attr(ddbl1_product_id, "0x%08x");
  367. gb_interface_attr(interface_id, "%u");
  368. gb_interface_attr(vendor_id, "0x%08x");
  369. gb_interface_attr(product_id, "0x%08x");
  370. gb_interface_attr(serial_number, "0x%016llx");
  371. static ssize_t voltage_now_show(struct device *dev,
  372. struct device_attribute *attr, char *buf)
  373. {
  374. struct gb_interface *intf = to_gb_interface(dev);
  375. int ret;
  376. u32 measurement;
  377. ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
  378. GB_SVC_PWRMON_TYPE_VOL,
  379. &measurement);
  380. if (ret) {
  381. dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
  382. return ret;
  383. }
  384. return sprintf(buf, "%u\n", measurement);
  385. }
  386. static DEVICE_ATTR_RO(voltage_now);
  387. static ssize_t current_now_show(struct device *dev,
  388. struct device_attribute *attr, char *buf)
  389. {
  390. struct gb_interface *intf = to_gb_interface(dev);
  391. int ret;
  392. u32 measurement;
  393. ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
  394. GB_SVC_PWRMON_TYPE_CURR,
  395. &measurement);
  396. if (ret) {
  397. dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
  398. return ret;
  399. }
  400. return sprintf(buf, "%u\n", measurement);
  401. }
  402. static DEVICE_ATTR_RO(current_now);
  403. static ssize_t power_now_show(struct device *dev,
  404. struct device_attribute *attr, char *buf)
  405. {
  406. struct gb_interface *intf = to_gb_interface(dev);
  407. int ret;
  408. u32 measurement;
  409. ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
  410. GB_SVC_PWRMON_TYPE_PWR,
  411. &measurement);
  412. if (ret) {
  413. dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
  414. return ret;
  415. }
  416. return sprintf(buf, "%u\n", measurement);
  417. }
  418. static DEVICE_ATTR_RO(power_now);
  419. static ssize_t power_state_show(struct device *dev,
  420. struct device_attribute *attr, char *buf)
  421. {
  422. struct gb_interface *intf = to_gb_interface(dev);
  423. if (intf->active)
  424. return scnprintf(buf, PAGE_SIZE, "on\n");
  425. else
  426. return scnprintf(buf, PAGE_SIZE, "off\n");
  427. }
  428. static ssize_t power_state_store(struct device *dev,
  429. struct device_attribute *attr, const char *buf,
  430. size_t len)
  431. {
  432. struct gb_interface *intf = to_gb_interface(dev);
  433. bool activate;
  434. int ret = 0;
  435. if (kstrtobool(buf, &activate))
  436. return -EINVAL;
  437. mutex_lock(&intf->mutex);
  438. if (activate == intf->active)
  439. goto unlock;
  440. if (activate) {
  441. ret = gb_interface_activate(intf);
  442. if (ret) {
  443. dev_err(&intf->dev,
  444. "failed to activate interface: %d\n", ret);
  445. goto unlock;
  446. }
  447. ret = gb_interface_enable(intf);
  448. if (ret) {
  449. dev_err(&intf->dev,
  450. "failed to enable interface: %d\n", ret);
  451. gb_interface_deactivate(intf);
  452. goto unlock;
  453. }
  454. } else {
  455. gb_interface_disable(intf);
  456. gb_interface_deactivate(intf);
  457. }
  458. unlock:
  459. mutex_unlock(&intf->mutex);
  460. if (ret)
  461. return ret;
  462. return len;
  463. }
  464. static DEVICE_ATTR_RW(power_state);
  465. static const char *gb_interface_type_string(struct gb_interface *intf)
  466. {
  467. static const char * const types[] = {
  468. [GB_INTERFACE_TYPE_INVALID] = "invalid",
  469. [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
  470. [GB_INTERFACE_TYPE_DUMMY] = "dummy",
  471. [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
  472. [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
  473. };
  474. return types[intf->type];
  475. }
  476. static ssize_t interface_type_show(struct device *dev,
  477. struct device_attribute *attr, char *buf)
  478. {
  479. struct gb_interface *intf = to_gb_interface(dev);
  480. return sprintf(buf, "%s\n", gb_interface_type_string(intf));
  481. }
  482. static DEVICE_ATTR_RO(interface_type);
  483. static struct attribute *interface_unipro_attrs[] = {
  484. &dev_attr_ddbl1_manufacturer_id.attr,
  485. &dev_attr_ddbl1_product_id.attr,
  486. NULL
  487. };
  488. static struct attribute *interface_greybus_attrs[] = {
  489. &dev_attr_vendor_id.attr,
  490. &dev_attr_product_id.attr,
  491. &dev_attr_serial_number.attr,
  492. NULL
  493. };
  494. static struct attribute *interface_power_attrs[] = {
  495. &dev_attr_voltage_now.attr,
  496. &dev_attr_current_now.attr,
  497. &dev_attr_power_now.attr,
  498. &dev_attr_power_state.attr,
  499. NULL
  500. };
  501. static struct attribute *interface_common_attrs[] = {
  502. &dev_attr_interface_id.attr,
  503. &dev_attr_interface_type.attr,
  504. NULL
  505. };
  506. static umode_t interface_unipro_is_visible(struct kobject *kobj,
  507. struct attribute *attr, int n)
  508. {
  509. struct device *dev = kobj_to_dev(kobj);
  510. struct gb_interface *intf = to_gb_interface(dev);
  511. switch (intf->type) {
  512. case GB_INTERFACE_TYPE_UNIPRO:
  513. case GB_INTERFACE_TYPE_GREYBUS:
  514. return attr->mode;
  515. default:
  516. return 0;
  517. }
  518. }
  519. static umode_t interface_greybus_is_visible(struct kobject *kobj,
  520. struct attribute *attr, int n)
  521. {
  522. struct device *dev = kobj_to_dev(kobj);
  523. struct gb_interface *intf = to_gb_interface(dev);
  524. switch (intf->type) {
  525. case GB_INTERFACE_TYPE_GREYBUS:
  526. return attr->mode;
  527. default:
  528. return 0;
  529. }
  530. }
  531. static umode_t interface_power_is_visible(struct kobject *kobj,
  532. struct attribute *attr, int n)
  533. {
  534. struct device *dev = kobj_to_dev(kobj);
  535. struct gb_interface *intf = to_gb_interface(dev);
  536. switch (intf->type) {
  537. case GB_INTERFACE_TYPE_UNIPRO:
  538. case GB_INTERFACE_TYPE_GREYBUS:
  539. return attr->mode;
  540. default:
  541. return 0;
  542. }
  543. }
  544. static const struct attribute_group interface_unipro_group = {
  545. .is_visible = interface_unipro_is_visible,
  546. .attrs = interface_unipro_attrs,
  547. };
  548. static const struct attribute_group interface_greybus_group = {
  549. .is_visible = interface_greybus_is_visible,
  550. .attrs = interface_greybus_attrs,
  551. };
  552. static const struct attribute_group interface_power_group = {
  553. .is_visible = interface_power_is_visible,
  554. .attrs = interface_power_attrs,
  555. };
  556. static const struct attribute_group interface_common_group = {
  557. .attrs = interface_common_attrs,
  558. };
  559. static const struct attribute_group *interface_groups[] = {
  560. &interface_unipro_group,
  561. &interface_greybus_group,
  562. &interface_power_group,
  563. &interface_common_group,
  564. NULL
  565. };
  566. static void gb_interface_release(struct device *dev)
  567. {
  568. struct gb_interface *intf = to_gb_interface(dev);
  569. trace_gb_interface_release(intf);
  570. kfree(intf);
  571. }
  572. #ifdef CONFIG_PM
  573. static int gb_interface_suspend(struct device *dev)
  574. {
  575. struct gb_interface *intf = to_gb_interface(dev);
  576. int ret;
  577. ret = gb_control_interface_suspend_prepare(intf->control);
  578. if (ret)
  579. return ret;
  580. ret = gb_control_suspend(intf->control);
  581. if (ret)
  582. goto err_hibernate_abort;
  583. ret = gb_interface_hibernate_link(intf);
  584. if (ret)
  585. return ret;
  586. /* Delay to allow interface to enter standby before disabling refclk */
  587. msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
  588. ret = gb_interface_refclk_set(intf, false);
  589. if (ret)
  590. return ret;
  591. return 0;
  592. err_hibernate_abort:
  593. gb_control_interface_hibernate_abort(intf->control);
  594. return ret;
  595. }
  596. static int gb_interface_resume(struct device *dev)
  597. {
  598. struct gb_interface *intf = to_gb_interface(dev);
  599. struct gb_svc *svc = intf->hd->svc;
  600. int ret;
  601. ret = gb_interface_refclk_set(intf, true);
  602. if (ret)
  603. return ret;
  604. ret = gb_svc_intf_resume(svc, intf->interface_id);
  605. if (ret)
  606. return ret;
  607. ret = gb_control_resume(intf->control);
  608. if (ret)
  609. return ret;
  610. return 0;
  611. }
  612. static int gb_interface_runtime_idle(struct device *dev)
  613. {
  614. pm_runtime_mark_last_busy(dev);
  615. pm_request_autosuspend(dev);
  616. return 0;
  617. }
  618. #endif
  619. static const struct dev_pm_ops gb_interface_pm_ops = {
  620. SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
  621. gb_interface_runtime_idle)
  622. };
  623. struct device_type greybus_interface_type = {
  624. .name = "greybus_interface",
  625. .release = gb_interface_release,
  626. .pm = &gb_interface_pm_ops,
  627. };
  628. /*
  629. * A Greybus module represents a user-replaceable component on a GMP
  630. * phone. An interface is the physical connection on that module. A
  631. * module may have more than one interface.
  632. *
  633. * Create a gb_interface structure to represent a discovered interface.
  634. * The position of interface within the Endo is encoded in "interface_id"
  635. * argument.
  636. *
  637. * Returns a pointer to the new interfce or a null pointer if a
  638. * failure occurs due to memory exhaustion.
  639. */
  640. struct gb_interface *gb_interface_create(struct gb_module *module,
  641. u8 interface_id)
  642. {
  643. struct gb_host_device *hd = module->hd;
  644. struct gb_interface *intf;
  645. intf = kzalloc(sizeof(*intf), GFP_KERNEL);
  646. if (!intf)
  647. return NULL;
  648. intf->hd = hd; /* XXX refcount? */
  649. intf->module = module;
  650. intf->interface_id = interface_id;
  651. INIT_LIST_HEAD(&intf->bundles);
  652. INIT_LIST_HEAD(&intf->manifest_descs);
  653. mutex_init(&intf->mutex);
  654. INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
  655. init_completion(&intf->mode_switch_completion);
  656. /* Invalid device id to start with */
  657. intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
  658. intf->dev.parent = &module->dev;
  659. intf->dev.bus = &greybus_bus_type;
  660. intf->dev.type = &greybus_interface_type;
  661. intf->dev.groups = interface_groups;
  662. intf->dev.dma_mask = module->dev.dma_mask;
  663. device_initialize(&intf->dev);
  664. dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
  665. interface_id);
  666. pm_runtime_set_autosuspend_delay(&intf->dev,
  667. GB_INTERFACE_AUTOSUSPEND_MS);
  668. trace_gb_interface_create(intf);
  669. return intf;
  670. }
  671. static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
  672. {
  673. struct gb_svc *svc = intf->hd->svc;
  674. int ret;
  675. dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
  676. ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
  677. if (ret) {
  678. dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
  679. return ret;
  680. }
  681. return 0;
  682. }
  683. static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
  684. {
  685. struct gb_svc *svc = intf->hd->svc;
  686. int ret;
  687. dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
  688. ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
  689. if (ret) {
  690. dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
  691. return ret;
  692. }
  693. return 0;
  694. }
  695. static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
  696. {
  697. struct gb_svc *svc = intf->hd->svc;
  698. int ret;
  699. dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
  700. ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
  701. if (ret) {
  702. dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
  703. return ret;
  704. }
  705. return 0;
  706. }
  707. static int gb_interface_activate_operation(struct gb_interface *intf,
  708. enum gb_interface_type *intf_type)
  709. {
  710. struct gb_svc *svc = intf->hd->svc;
  711. u8 type;
  712. int ret;
  713. dev_dbg(&intf->dev, "%s\n", __func__);
  714. ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
  715. if (ret) {
  716. dev_err(&intf->dev, "failed to activate: %d\n", ret);
  717. return ret;
  718. }
  719. switch (type) {
  720. case GB_SVC_INTF_TYPE_DUMMY:
  721. *intf_type = GB_INTERFACE_TYPE_DUMMY;
  722. /* FIXME: handle as an error for now */
  723. return -ENODEV;
  724. case GB_SVC_INTF_TYPE_UNIPRO:
  725. *intf_type = GB_INTERFACE_TYPE_UNIPRO;
  726. dev_err(&intf->dev, "interface type UniPro not supported\n");
  727. /* FIXME: handle as an error for now */
  728. return -ENODEV;
  729. case GB_SVC_INTF_TYPE_GREYBUS:
  730. *intf_type = GB_INTERFACE_TYPE_GREYBUS;
  731. break;
  732. default:
  733. dev_err(&intf->dev, "unknown interface type: %u\n", type);
  734. *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
  735. return -ENODEV;
  736. }
  737. return 0;
  738. }
  739. static int gb_interface_hibernate_link(struct gb_interface *intf)
  740. {
  741. struct gb_svc *svc = intf->hd->svc;
  742. return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
  743. }
  744. static int _gb_interface_activate(struct gb_interface *intf,
  745. enum gb_interface_type *type)
  746. {
  747. int ret;
  748. *type = GB_INTERFACE_TYPE_UNKNOWN;
  749. if (intf->ejected || intf->removed)
  750. return -ENODEV;
  751. ret = gb_interface_vsys_set(intf, true);
  752. if (ret)
  753. return ret;
  754. ret = gb_interface_refclk_set(intf, true);
  755. if (ret)
  756. goto err_vsys_disable;
  757. ret = gb_interface_unipro_set(intf, true);
  758. if (ret)
  759. goto err_refclk_disable;
  760. ret = gb_interface_activate_operation(intf, type);
  761. if (ret) {
  762. switch (*type) {
  763. case GB_INTERFACE_TYPE_UNIPRO:
  764. case GB_INTERFACE_TYPE_GREYBUS:
  765. goto err_hibernate_link;
  766. default:
  767. goto err_unipro_disable;
  768. }
  769. }
  770. ret = gb_interface_read_dme(intf);
  771. if (ret)
  772. goto err_hibernate_link;
  773. ret = gb_interface_route_create(intf);
  774. if (ret)
  775. goto err_hibernate_link;
  776. intf->active = true;
  777. trace_gb_interface_activate(intf);
  778. return 0;
  779. err_hibernate_link:
  780. gb_interface_hibernate_link(intf);
  781. err_unipro_disable:
  782. gb_interface_unipro_set(intf, false);
  783. err_refclk_disable:
  784. gb_interface_refclk_set(intf, false);
  785. err_vsys_disable:
  786. gb_interface_vsys_set(intf, false);
  787. return ret;
  788. }
  789. /*
  790. * At present, we assume a UniPro-only module to be a Greybus module that
  791. * failed to send its mailbox poke. There is some reason to believe that this
  792. * is because of a bug in the ES3 bootrom.
  793. *
  794. * FIXME: Check if this is a Toshiba bridge before retrying?
  795. */
  796. static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
  797. enum gb_interface_type *type)
  798. {
  799. int retries = 3;
  800. int ret;
  801. while (retries--) {
  802. ret = _gb_interface_activate(intf, type);
  803. if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
  804. continue;
  805. break;
  806. }
  807. return ret;
  808. }
  809. /*
  810. * Activate an interface.
  811. *
  812. * Locking: Caller holds the interface mutex.
  813. */
  814. int gb_interface_activate(struct gb_interface *intf)
  815. {
  816. enum gb_interface_type type;
  817. int ret;
  818. switch (intf->type) {
  819. case GB_INTERFACE_TYPE_INVALID:
  820. case GB_INTERFACE_TYPE_GREYBUS:
  821. ret = _gb_interface_activate_es3_hack(intf, &type);
  822. break;
  823. default:
  824. ret = _gb_interface_activate(intf, &type);
  825. }
  826. /* Make sure type is detected correctly during reactivation. */
  827. if (intf->type != GB_INTERFACE_TYPE_INVALID) {
  828. if (type != intf->type) {
  829. dev_err(&intf->dev, "failed to detect interface type\n");
  830. if (!ret)
  831. gb_interface_deactivate(intf);
  832. return -EIO;
  833. }
  834. } else {
  835. intf->type = type;
  836. }
  837. return ret;
  838. }
  839. /*
  840. * Deactivate an interface.
  841. *
  842. * Locking: Caller holds the interface mutex.
  843. */
  844. void gb_interface_deactivate(struct gb_interface *intf)
  845. {
  846. if (!intf->active)
  847. return;
  848. trace_gb_interface_deactivate(intf);
  849. /* Abort any ongoing mode switch. */
  850. if (intf->mode_switch)
  851. complete(&intf->mode_switch_completion);
  852. gb_interface_route_destroy(intf);
  853. gb_interface_hibernate_link(intf);
  854. gb_interface_unipro_set(intf, false);
  855. gb_interface_refclk_set(intf, false);
  856. gb_interface_vsys_set(intf, false);
  857. intf->active = false;
  858. }
  859. /*
  860. * Enable an interface by enabling its control connection, fetching the
  861. * manifest and other information over it, and finally registering its child
  862. * devices.
  863. *
  864. * Locking: Caller holds the interface mutex.
  865. */
  866. int gb_interface_enable(struct gb_interface *intf)
  867. {
  868. struct gb_control *control;
  869. struct gb_bundle *bundle, *tmp;
  870. int ret, size;
  871. void *manifest;
  872. ret = gb_interface_read_and_clear_init_status(intf);
  873. if (ret) {
  874. dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
  875. return ret;
  876. }
  877. /* Establish control connection */
  878. control = gb_control_create(intf);
  879. if (IS_ERR(control)) {
  880. dev_err(&intf->dev, "failed to create control device: %ld\n",
  881. PTR_ERR(control));
  882. return PTR_ERR(control);
  883. }
  884. intf->control = control;
  885. ret = gb_control_enable(intf->control);
  886. if (ret)
  887. goto err_put_control;
  888. /* Get manifest size using control protocol on CPort */
  889. size = gb_control_get_manifest_size_operation(intf);
  890. if (size <= 0) {
  891. dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
  892. if (size)
  893. ret = size;
  894. else
  895. ret = -EINVAL;
  896. goto err_disable_control;
  897. }
  898. manifest = kmalloc(size, GFP_KERNEL);
  899. if (!manifest) {
  900. ret = -ENOMEM;
  901. goto err_disable_control;
  902. }
  903. /* Get manifest using control protocol on CPort */
  904. ret = gb_control_get_manifest_operation(intf, manifest, size);
  905. if (ret) {
  906. dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
  907. goto err_free_manifest;
  908. }
  909. /*
  910. * Parse the manifest and build up our data structures representing
  911. * what's in it.
  912. */
  913. if (!gb_manifest_parse(intf, manifest, size)) {
  914. dev_err(&intf->dev, "failed to parse manifest\n");
  915. ret = -EINVAL;
  916. goto err_destroy_bundles;
  917. }
  918. ret = gb_control_get_bundle_versions(intf->control);
  919. if (ret)
  920. goto err_destroy_bundles;
  921. /* Register the control device and any bundles */
  922. ret = gb_control_add(intf->control);
  923. if (ret)
  924. goto err_destroy_bundles;
  925. pm_runtime_use_autosuspend(&intf->dev);
  926. pm_runtime_get_noresume(&intf->dev);
  927. pm_runtime_set_active(&intf->dev);
  928. pm_runtime_enable(&intf->dev);
  929. list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
  930. ret = gb_bundle_add(bundle);
  931. if (ret) {
  932. gb_bundle_destroy(bundle);
  933. continue;
  934. }
  935. }
  936. kfree(manifest);
  937. intf->enabled = true;
  938. pm_runtime_put(&intf->dev);
  939. trace_gb_interface_enable(intf);
  940. return 0;
  941. err_destroy_bundles:
  942. list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
  943. gb_bundle_destroy(bundle);
  944. err_free_manifest:
  945. kfree(manifest);
  946. err_disable_control:
  947. gb_control_disable(intf->control);
  948. err_put_control:
  949. gb_control_put(intf->control);
  950. intf->control = NULL;
  951. return ret;
  952. }
  953. /*
  954. * Disable an interface and destroy its bundles.
  955. *
  956. * Locking: Caller holds the interface mutex.
  957. */
  958. void gb_interface_disable(struct gb_interface *intf)
  959. {
  960. struct gb_bundle *bundle;
  961. struct gb_bundle *next;
  962. if (!intf->enabled)
  963. return;
  964. trace_gb_interface_disable(intf);
  965. pm_runtime_get_sync(&intf->dev);
  966. /* Set disconnected flag to avoid I/O during connection tear down. */
  967. if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
  968. intf->disconnected = true;
  969. list_for_each_entry_safe(bundle, next, &intf->bundles, links)
  970. gb_bundle_destroy(bundle);
  971. if (!intf->mode_switch && !intf->disconnected)
  972. gb_control_interface_deactivate_prepare(intf->control);
  973. gb_control_del(intf->control);
  974. gb_control_disable(intf->control);
  975. gb_control_put(intf->control);
  976. intf->control = NULL;
  977. intf->enabled = false;
  978. pm_runtime_disable(&intf->dev);
  979. pm_runtime_set_suspended(&intf->dev);
  980. pm_runtime_dont_use_autosuspend(&intf->dev);
  981. pm_runtime_put_noidle(&intf->dev);
  982. }
  983. /* Register an interface. */
  984. int gb_interface_add(struct gb_interface *intf)
  985. {
  986. int ret;
  987. ret = device_add(&intf->dev);
  988. if (ret) {
  989. dev_err(&intf->dev, "failed to register interface: %d\n", ret);
  990. return ret;
  991. }
  992. trace_gb_interface_add(intf);
  993. dev_info(&intf->dev, "Interface added (%s)\n",
  994. gb_interface_type_string(intf));
  995. switch (intf->type) {
  996. case GB_INTERFACE_TYPE_GREYBUS:
  997. dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
  998. intf->vendor_id, intf->product_id);
  999. fallthrough;
  1000. case GB_INTERFACE_TYPE_UNIPRO:
  1001. dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
  1002. intf->ddbl1_manufacturer_id,
  1003. intf->ddbl1_product_id);
  1004. break;
  1005. default:
  1006. break;
  1007. }
  1008. return 0;
  1009. }
  1010. /* Deregister an interface. */
  1011. void gb_interface_del(struct gb_interface *intf)
  1012. {
  1013. if (device_is_registered(&intf->dev)) {
  1014. trace_gb_interface_del(intf);
  1015. device_del(&intf->dev);
  1016. dev_info(&intf->dev, "Interface removed\n");
  1017. }
  1018. }
  1019. void gb_interface_put(struct gb_interface *intf)
  1020. {
  1021. put_device(&intf->dev);
  1022. }