bus.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
  4. * Intel Management Engine Interface (Intel MEI) Linux driver
  5. */
  6. #include <linux/module.h>
  7. #include <linux/device.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched/signal.h>
  10. #include <linux/init.h>
  11. #include <linux/errno.h>
  12. #include <linux/slab.h>
  13. #include <linux/mutex.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/mei_cl_bus.h>
  16. #include "mei_dev.h"
  17. #include "client.h"
  18. #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
  19. /**
  20. * __mei_cl_send - internal client send (write)
  21. *
  22. * @cl: host client
  23. * @buf: buffer to send
  24. * @length: buffer length
  25. * @vtag: virtual tag
  26. * @mode: sending mode
  27. *
  28. * Return: written size bytes or < 0 on error
  29. */
  30. ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
  31. unsigned int mode)
  32. {
  33. struct mei_device *bus;
  34. struct mei_cl_cb *cb;
  35. ssize_t rets;
  36. if (WARN_ON(!cl || !cl->dev))
  37. return -ENODEV;
  38. bus = cl->dev;
  39. mutex_lock(&bus->device_lock);
  40. if (bus->dev_state != MEI_DEV_ENABLED &&
  41. bus->dev_state != MEI_DEV_POWERING_DOWN) {
  42. rets = -ENODEV;
  43. goto out;
  44. }
  45. if (!mei_cl_is_connected(cl)) {
  46. rets = -ENODEV;
  47. goto out;
  48. }
  49. /* Check if we have an ME client device */
  50. if (!mei_me_cl_is_active(cl->me_cl)) {
  51. rets = -ENOTTY;
  52. goto out;
  53. }
  54. if (vtag) {
  55. /* Check if vtag is supported by client */
  56. rets = mei_cl_vt_support_check(cl);
  57. if (rets)
  58. goto out;
  59. }
  60. if (length > mei_cl_mtu(cl)) {
  61. rets = -EFBIG;
  62. goto out;
  63. }
  64. while (cl->tx_cb_queued >= bus->tx_queue_limit) {
  65. mutex_unlock(&bus->device_lock);
  66. rets = wait_event_interruptible(cl->tx_wait,
  67. cl->writing_state == MEI_WRITE_COMPLETE ||
  68. (!mei_cl_is_connected(cl)));
  69. mutex_lock(&bus->device_lock);
  70. if (rets) {
  71. if (signal_pending(current))
  72. rets = -EINTR;
  73. goto out;
  74. }
  75. if (!mei_cl_is_connected(cl)) {
  76. rets = -ENODEV;
  77. goto out;
  78. }
  79. }
  80. cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
  81. if (!cb) {
  82. rets = -ENOMEM;
  83. goto out;
  84. }
  85. cb->vtag = vtag;
  86. cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
  87. cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
  88. memcpy(cb->buf.data, buf, length);
  89. rets = mei_cl_write(cl, cb);
  90. out:
  91. mutex_unlock(&bus->device_lock);
  92. return rets;
  93. }
  94. /**
  95. * __mei_cl_recv - internal client receive (read)
  96. *
  97. * @cl: host client
  98. * @buf: buffer to receive
  99. * @length: buffer length
  100. * @mode: io mode
  101. * @vtag: virtual tag
  102. * @timeout: recv timeout, 0 for infinite timeout
  103. *
  104. * Return: read size in bytes of < 0 on error
  105. */
  106. ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
  107. unsigned int mode, unsigned long timeout)
  108. {
  109. struct mei_device *bus;
  110. struct mei_cl_cb *cb;
  111. size_t r_length;
  112. ssize_t rets;
  113. bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
  114. if (WARN_ON(!cl || !cl->dev))
  115. return -ENODEV;
  116. bus = cl->dev;
  117. mutex_lock(&bus->device_lock);
  118. if (bus->dev_state != MEI_DEV_ENABLED &&
  119. bus->dev_state != MEI_DEV_POWERING_DOWN) {
  120. rets = -ENODEV;
  121. goto out;
  122. }
  123. cb = mei_cl_read_cb(cl, NULL);
  124. if (cb)
  125. goto copy;
  126. rets = mei_cl_read_start(cl, length, NULL);
  127. if (rets && rets != -EBUSY)
  128. goto out;
  129. if (nonblock) {
  130. rets = -EAGAIN;
  131. goto out;
  132. }
  133. /* wait on event only if there is no other waiter */
  134. /* synchronized under device mutex */
  135. if (!waitqueue_active(&cl->rx_wait)) {
  136. mutex_unlock(&bus->device_lock);
  137. if (timeout) {
  138. rets = wait_event_interruptible_timeout
  139. (cl->rx_wait,
  140. mei_cl_read_cb(cl, NULL) ||
  141. (!mei_cl_is_connected(cl)),
  142. msecs_to_jiffies(timeout));
  143. if (rets == 0)
  144. return -ETIME;
  145. if (rets < 0) {
  146. if (signal_pending(current))
  147. return -EINTR;
  148. return -ERESTARTSYS;
  149. }
  150. } else {
  151. if (wait_event_interruptible
  152. (cl->rx_wait,
  153. mei_cl_read_cb(cl, NULL) ||
  154. (!mei_cl_is_connected(cl)))) {
  155. if (signal_pending(current))
  156. return -EINTR;
  157. return -ERESTARTSYS;
  158. }
  159. }
  160. mutex_lock(&bus->device_lock);
  161. if (!mei_cl_is_connected(cl)) {
  162. rets = -ENODEV;
  163. goto out;
  164. }
  165. }
  166. cb = mei_cl_read_cb(cl, NULL);
  167. if (!cb) {
  168. rets = 0;
  169. goto out;
  170. }
  171. copy:
  172. if (cb->status) {
  173. rets = cb->status;
  174. goto free;
  175. }
  176. r_length = min_t(size_t, length, cb->buf_idx);
  177. memcpy(buf, cb->buf.data, r_length);
  178. rets = r_length;
  179. if (vtag)
  180. *vtag = cb->vtag;
  181. free:
  182. mei_cl_del_rd_completed(cl, cb);
  183. out:
  184. mutex_unlock(&bus->device_lock);
  185. return rets;
  186. }
  187. /**
  188. * mei_cldev_send_vtag - me device send with vtag (write)
  189. *
  190. * @cldev: me client device
  191. * @buf: buffer to send
  192. * @length: buffer length
  193. * @vtag: virtual tag
  194. *
  195. * Return:
  196. * * written size in bytes
  197. * * < 0 on error
  198. */
  199. ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
  200. size_t length, u8 vtag)
  201. {
  202. struct mei_cl *cl = cldev->cl;
  203. return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
  204. }
  205. EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
  206. /**
  207. * mei_cldev_recv_vtag - client receive with vtag (read)
  208. *
  209. * @cldev: me client device
  210. * @buf: buffer to receive
  211. * @length: buffer length
  212. * @vtag: virtual tag
  213. *
  214. * Return:
  215. * * read size in bytes
  216. * * < 0 on error
  217. */
  218. ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
  219. u8 *vtag)
  220. {
  221. struct mei_cl *cl = cldev->cl;
  222. return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
  223. }
  224. EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
  225. /**
  226. * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
  227. *
  228. * @cldev: me client device
  229. * @buf: buffer to receive
  230. * @length: buffer length
  231. * @vtag: virtual tag
  232. *
  233. * Return:
  234. * * read size in bytes
  235. * * -EAGAIN if function will block.
  236. * * < 0 on other error
  237. */
  238. ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
  239. size_t length, u8 *vtag)
  240. {
  241. struct mei_cl *cl = cldev->cl;
  242. return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
  243. }
  244. EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
  245. /**
  246. * mei_cldev_send - me device send (write)
  247. *
  248. * @cldev: me client device
  249. * @buf: buffer to send
  250. * @length: buffer length
  251. *
  252. * Return:
  253. * * written size in bytes
  254. * * < 0 on error
  255. */
  256. ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
  257. {
  258. return mei_cldev_send_vtag(cldev, buf, length, 0);
  259. }
  260. EXPORT_SYMBOL_GPL(mei_cldev_send);
  261. /**
  262. * mei_cldev_recv - client receive (read)
  263. *
  264. * @cldev: me client device
  265. * @buf: buffer to receive
  266. * @length: buffer length
  267. *
  268. * Return: read size in bytes of < 0 on error
  269. */
  270. ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
  271. {
  272. return mei_cldev_recv_vtag(cldev, buf, length, NULL);
  273. }
  274. EXPORT_SYMBOL_GPL(mei_cldev_recv);
  275. /**
  276. * mei_cldev_recv_nonblock - non block client receive (read)
  277. *
  278. * @cldev: me client device
  279. * @buf: buffer to receive
  280. * @length: buffer length
  281. *
  282. * Return: read size in bytes of < 0 on error
  283. * -EAGAIN if function will block.
  284. */
  285. ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
  286. size_t length)
  287. {
  288. return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
  289. }
  290. EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
  291. /**
  292. * mei_cl_bus_rx_work - dispatch rx event for a bus device
  293. *
  294. * @work: work
  295. */
  296. static void mei_cl_bus_rx_work(struct work_struct *work)
  297. {
  298. struct mei_cl_device *cldev;
  299. struct mei_device *bus;
  300. cldev = container_of(work, struct mei_cl_device, rx_work);
  301. bus = cldev->bus;
  302. if (cldev->rx_cb)
  303. cldev->rx_cb(cldev);
  304. mutex_lock(&bus->device_lock);
  305. if (mei_cl_is_connected(cldev->cl))
  306. mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
  307. mutex_unlock(&bus->device_lock);
  308. }
  309. /**
  310. * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
  311. *
  312. * @work: work
  313. */
  314. static void mei_cl_bus_notif_work(struct work_struct *work)
  315. {
  316. struct mei_cl_device *cldev;
  317. cldev = container_of(work, struct mei_cl_device, notif_work);
  318. if (cldev->notif_cb)
  319. cldev->notif_cb(cldev);
  320. }
  321. /**
  322. * mei_cl_bus_notify_event - schedule notify cb on bus client
  323. *
  324. * @cl: host client
  325. *
  326. * Return: true if event was scheduled
  327. * false if the client is not waiting for event
  328. */
  329. bool mei_cl_bus_notify_event(struct mei_cl *cl)
  330. {
  331. struct mei_cl_device *cldev = cl->cldev;
  332. if (!cldev || !cldev->notif_cb)
  333. return false;
  334. if (!cl->notify_ev)
  335. return false;
  336. schedule_work(&cldev->notif_work);
  337. cl->notify_ev = false;
  338. return true;
  339. }
  340. /**
  341. * mei_cl_bus_rx_event - schedule rx event
  342. *
  343. * @cl: host client
  344. *
  345. * Return: true if event was scheduled
  346. * false if the client is not waiting for event
  347. */
  348. bool mei_cl_bus_rx_event(struct mei_cl *cl)
  349. {
  350. struct mei_cl_device *cldev = cl->cldev;
  351. if (!cldev || !cldev->rx_cb)
  352. return false;
  353. schedule_work(&cldev->rx_work);
  354. return true;
  355. }
  356. /**
  357. * mei_cldev_register_rx_cb - register Rx event callback
  358. *
  359. * @cldev: me client devices
  360. * @rx_cb: callback function
  361. *
  362. * Return: 0 on success
  363. * -EALREADY if an callback is already registered
  364. * <0 on other errors
  365. */
  366. int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
  367. {
  368. struct mei_device *bus = cldev->bus;
  369. int ret;
  370. if (!rx_cb)
  371. return -EINVAL;
  372. if (cldev->rx_cb)
  373. return -EALREADY;
  374. cldev->rx_cb = rx_cb;
  375. INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
  376. mutex_lock(&bus->device_lock);
  377. if (mei_cl_is_connected(cldev->cl))
  378. ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
  379. else
  380. ret = -ENODEV;
  381. mutex_unlock(&bus->device_lock);
  382. if (ret && ret != -EBUSY) {
  383. cancel_work_sync(&cldev->rx_work);
  384. cldev->rx_cb = NULL;
  385. return ret;
  386. }
  387. return 0;
  388. }
  389. EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
  390. /**
  391. * mei_cldev_register_notif_cb - register FW notification event callback
  392. *
  393. * @cldev: me client devices
  394. * @notif_cb: callback function
  395. *
  396. * Return: 0 on success
  397. * -EALREADY if an callback is already registered
  398. * <0 on other errors
  399. */
  400. int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
  401. mei_cldev_cb_t notif_cb)
  402. {
  403. struct mei_device *bus = cldev->bus;
  404. int ret;
  405. if (!notif_cb)
  406. return -EINVAL;
  407. if (cldev->notif_cb)
  408. return -EALREADY;
  409. cldev->notif_cb = notif_cb;
  410. INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
  411. mutex_lock(&bus->device_lock);
  412. ret = mei_cl_notify_request(cldev->cl, NULL, 1);
  413. mutex_unlock(&bus->device_lock);
  414. if (ret) {
  415. cancel_work_sync(&cldev->notif_work);
  416. cldev->notif_cb = NULL;
  417. return ret;
  418. }
  419. return 0;
  420. }
  421. EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
  422. /**
  423. * mei_cldev_get_drvdata - driver data getter
  424. *
  425. * @cldev: mei client device
  426. *
  427. * Return: driver private data
  428. */
  429. void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
  430. {
  431. return dev_get_drvdata(&cldev->dev);
  432. }
  433. EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
  434. /**
  435. * mei_cldev_set_drvdata - driver data setter
  436. *
  437. * @cldev: mei client device
  438. * @data: data to store
  439. */
  440. void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
  441. {
  442. dev_set_drvdata(&cldev->dev, data);
  443. }
  444. EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
  445. /**
  446. * mei_cldev_uuid - return uuid of the underlying me client
  447. *
  448. * @cldev: mei client device
  449. *
  450. * Return: me client uuid
  451. */
  452. const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
  453. {
  454. return mei_me_cl_uuid(cldev->me_cl);
  455. }
  456. EXPORT_SYMBOL_GPL(mei_cldev_uuid);
  457. /**
  458. * mei_cldev_ver - return protocol version of the underlying me client
  459. *
  460. * @cldev: mei client device
  461. *
  462. * Return: me client protocol version
  463. */
  464. u8 mei_cldev_ver(const struct mei_cl_device *cldev)
  465. {
  466. return mei_me_cl_ver(cldev->me_cl);
  467. }
  468. EXPORT_SYMBOL_GPL(mei_cldev_ver);
  469. /**
  470. * mei_cldev_enabled - check whether the device is enabled
  471. *
  472. * @cldev: mei client device
  473. *
  474. * Return: true if me client is initialized and connected
  475. */
  476. bool mei_cldev_enabled(const struct mei_cl_device *cldev)
  477. {
  478. return mei_cl_is_connected(cldev->cl);
  479. }
  480. EXPORT_SYMBOL_GPL(mei_cldev_enabled);
  481. /**
  482. * mei_cl_bus_module_get - acquire module of the underlying
  483. * hw driver.
  484. *
  485. * @cldev: mei client device
  486. *
  487. * Return: true on success; false if the module was removed.
  488. */
  489. static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
  490. {
  491. return try_module_get(cldev->bus->dev->driver->owner);
  492. }
  493. /**
  494. * mei_cl_bus_module_put - release the underlying hw module.
  495. *
  496. * @cldev: mei client device
  497. */
  498. static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
  499. {
  500. module_put(cldev->bus->dev->driver->owner);
  501. }
  502. /**
  503. * mei_cl_bus_vtag - get bus vtag entry wrapper
  504. * The tag for bus client is always first.
  505. *
  506. * @cl: host client
  507. *
  508. * Return: bus vtag or NULL
  509. */
  510. static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
  511. {
  512. return list_first_entry_or_null(&cl->vtag_map,
  513. struct mei_cl_vtag, list);
  514. }
  515. /**
  516. * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
  517. *
  518. * @cldev: me client device
  519. *
  520. * Return:
  521. * * 0 on success
  522. * * -ENOMEM if memory allocation failed
  523. */
  524. static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
  525. {
  526. struct mei_cl *cl = cldev->cl;
  527. struct mei_cl_vtag *cl_vtag;
  528. /*
  529. * Bail out if the client does not supports vtags
  530. * or has already allocated one
  531. */
  532. if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
  533. return 0;
  534. cl_vtag = mei_cl_vtag_alloc(NULL, 0);
  535. if (IS_ERR(cl_vtag))
  536. return -ENOMEM;
  537. list_add_tail(&cl_vtag->list, &cl->vtag_map);
  538. return 0;
  539. }
  540. /**
  541. * mei_cl_bus_vtag_free - remove the bus entry from vtag map
  542. *
  543. * @cldev: me client device
  544. */
  545. static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
  546. {
  547. struct mei_cl *cl = cldev->cl;
  548. struct mei_cl_vtag *cl_vtag;
  549. cl_vtag = mei_cl_bus_vtag(cl);
  550. if (!cl_vtag)
  551. return;
  552. list_del(&cl_vtag->list);
  553. kfree(cl_vtag);
  554. }
  555. void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
  556. {
  557. struct mei_device *bus;
  558. struct mei_cl *cl;
  559. int ret;
  560. if (!cldev || !buffer_id || !size)
  561. return ERR_PTR(-EINVAL);
  562. if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
  563. dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
  564. MEI_FW_PAGE_SIZE);
  565. return ERR_PTR(-EINVAL);
  566. }
  567. cl = cldev->cl;
  568. bus = cldev->bus;
  569. mutex_lock(&bus->device_lock);
  570. if (cl->state == MEI_FILE_UNINITIALIZED) {
  571. ret = mei_cl_link(cl);
  572. if (ret)
  573. goto notlinked;
  574. /* update pointers */
  575. cl->cldev = cldev;
  576. }
  577. ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
  578. if (ret)
  579. mei_cl_unlink(cl);
  580. notlinked:
  581. mutex_unlock(&bus->device_lock);
  582. if (ret)
  583. return ERR_PTR(ret);
  584. return cl->dma.vaddr;
  585. }
  586. EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
  587. int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
  588. {
  589. struct mei_device *bus;
  590. struct mei_cl *cl;
  591. int ret;
  592. if (!cldev)
  593. return -EINVAL;
  594. cl = cldev->cl;
  595. bus = cldev->bus;
  596. mutex_lock(&bus->device_lock);
  597. ret = mei_cl_dma_unmap(cl, NULL);
  598. mei_cl_flush_queues(cl, NULL);
  599. mei_cl_unlink(cl);
  600. mutex_unlock(&bus->device_lock);
  601. return ret;
  602. }
  603. EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
  604. /**
  605. * mei_cldev_enable - enable me client device
  606. * create connection with me client
  607. *
  608. * @cldev: me client device
  609. *
  610. * Return: 0 on success and < 0 on error
  611. */
  612. int mei_cldev_enable(struct mei_cl_device *cldev)
  613. {
  614. struct mei_device *bus = cldev->bus;
  615. struct mei_cl *cl;
  616. int ret;
  617. cl = cldev->cl;
  618. mutex_lock(&bus->device_lock);
  619. if (cl->state == MEI_FILE_UNINITIALIZED) {
  620. ret = mei_cl_link(cl);
  621. if (ret)
  622. goto notlinked;
  623. /* update pointers */
  624. cl->cldev = cldev;
  625. }
  626. if (mei_cl_is_connected(cl)) {
  627. ret = 0;
  628. goto out;
  629. }
  630. if (!mei_me_cl_is_active(cldev->me_cl)) {
  631. dev_err(&cldev->dev, "me client is not active\n");
  632. ret = -ENOTTY;
  633. goto out;
  634. }
  635. ret = mei_cl_bus_vtag_alloc(cldev);
  636. if (ret)
  637. goto out;
  638. ret = mei_cl_connect(cl, cldev->me_cl, NULL);
  639. if (ret < 0) {
  640. dev_err(&cldev->dev, "cannot connect\n");
  641. mei_cl_bus_vtag_free(cldev);
  642. }
  643. out:
  644. if (ret)
  645. mei_cl_unlink(cl);
  646. notlinked:
  647. mutex_unlock(&bus->device_lock);
  648. return ret;
  649. }
  650. EXPORT_SYMBOL_GPL(mei_cldev_enable);
  651. /**
  652. * mei_cldev_unregister_callbacks - internal wrapper for unregistering
  653. * callbacks.
  654. *
  655. * @cldev: client device
  656. */
  657. static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
  658. {
  659. if (cldev->rx_cb) {
  660. cancel_work_sync(&cldev->rx_work);
  661. cldev->rx_cb = NULL;
  662. }
  663. if (cldev->notif_cb) {
  664. cancel_work_sync(&cldev->notif_work);
  665. cldev->notif_cb = NULL;
  666. }
  667. }
  668. /**
  669. * mei_cldev_disable - disable me client device
  670. * disconnect form the me client
  671. *
  672. * @cldev: me client device
  673. *
  674. * Return: 0 on success and < 0 on error
  675. */
  676. int mei_cldev_disable(struct mei_cl_device *cldev)
  677. {
  678. struct mei_device *bus;
  679. struct mei_cl *cl;
  680. int err;
  681. if (!cldev)
  682. return -ENODEV;
  683. cl = cldev->cl;
  684. bus = cldev->bus;
  685. mei_cldev_unregister_callbacks(cldev);
  686. mutex_lock(&bus->device_lock);
  687. mei_cl_bus_vtag_free(cldev);
  688. if (!mei_cl_is_connected(cl)) {
  689. dev_dbg(bus->dev, "Already disconnected\n");
  690. err = 0;
  691. goto out;
  692. }
  693. err = mei_cl_disconnect(cl);
  694. if (err < 0)
  695. dev_err(bus->dev, "Could not disconnect from the ME client\n");
  696. out:
  697. /* Flush queues and remove any pending read unless we have mapped DMA */
  698. if (!cl->dma_mapped) {
  699. mei_cl_flush_queues(cl, NULL);
  700. mei_cl_unlink(cl);
  701. }
  702. mutex_unlock(&bus->device_lock);
  703. return err;
  704. }
  705. EXPORT_SYMBOL_GPL(mei_cldev_disable);
  706. /**
  707. * mei_cl_device_find - find matching entry in the driver id table
  708. *
  709. * @cldev: me client device
  710. * @cldrv: me client driver
  711. *
  712. * Return: id on success; NULL if no id is matching
  713. */
  714. static const
  715. struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
  716. const struct mei_cl_driver *cldrv)
  717. {
  718. const struct mei_cl_device_id *id;
  719. const uuid_le *uuid;
  720. u8 version;
  721. bool match;
  722. uuid = mei_me_cl_uuid(cldev->me_cl);
  723. version = mei_me_cl_ver(cldev->me_cl);
  724. id = cldrv->id_table;
  725. while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
  726. if (!uuid_le_cmp(*uuid, id->uuid)) {
  727. match = true;
  728. if (cldev->name[0])
  729. if (strncmp(cldev->name, id->name,
  730. sizeof(id->name)))
  731. match = false;
  732. if (id->version != MEI_CL_VERSION_ANY)
  733. if (id->version != version)
  734. match = false;
  735. if (match)
  736. return id;
  737. }
  738. id++;
  739. }
  740. return NULL;
  741. }
  742. /**
  743. * mei_cl_device_match - device match function
  744. *
  745. * @dev: device
  746. * @drv: driver
  747. *
  748. * Return: 1 if matching device was found 0 otherwise
  749. */
  750. static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
  751. {
  752. const struct mei_cl_device *cldev = to_mei_cl_device(dev);
  753. const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
  754. const struct mei_cl_device_id *found_id;
  755. if (!cldev)
  756. return 0;
  757. if (!cldev->do_match)
  758. return 0;
  759. if (!cldrv || !cldrv->id_table)
  760. return 0;
  761. found_id = mei_cl_device_find(cldev, cldrv);
  762. if (found_id)
  763. return 1;
  764. return 0;
  765. }
  766. /**
  767. * mei_cl_device_probe - bus probe function
  768. *
  769. * @dev: device
  770. *
  771. * Return: 0 on success; < 0 otherwise
  772. */
  773. static int mei_cl_device_probe(struct device *dev)
  774. {
  775. struct mei_cl_device *cldev;
  776. struct mei_cl_driver *cldrv;
  777. const struct mei_cl_device_id *id;
  778. int ret;
  779. cldev = to_mei_cl_device(dev);
  780. cldrv = to_mei_cl_driver(dev->driver);
  781. if (!cldev)
  782. return 0;
  783. if (!cldrv || !cldrv->probe)
  784. return -ENODEV;
  785. id = mei_cl_device_find(cldev, cldrv);
  786. if (!id)
  787. return -ENODEV;
  788. if (!mei_cl_bus_module_get(cldev)) {
  789. dev_err(&cldev->dev, "get hw module failed");
  790. return -ENODEV;
  791. }
  792. ret = cldrv->probe(cldev, id);
  793. if (ret) {
  794. mei_cl_bus_module_put(cldev);
  795. return ret;
  796. }
  797. __module_get(THIS_MODULE);
  798. return 0;
  799. }
  800. /**
  801. * mei_cl_device_remove - remove device from the bus
  802. *
  803. * @dev: device
  804. *
  805. * Return: 0 on success; < 0 otherwise
  806. */
  807. static void mei_cl_device_remove(struct device *dev)
  808. {
  809. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  810. struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
  811. if (cldrv->remove)
  812. cldrv->remove(cldev);
  813. mei_cldev_unregister_callbacks(cldev);
  814. mei_cl_bus_module_put(cldev);
  815. module_put(THIS_MODULE);
  816. }
  817. static ssize_t name_show(struct device *dev, struct device_attribute *a,
  818. char *buf)
  819. {
  820. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  821. return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
  822. }
  823. static DEVICE_ATTR_RO(name);
  824. static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
  825. char *buf)
  826. {
  827. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  828. const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
  829. return sprintf(buf, "%pUl", uuid);
  830. }
  831. static DEVICE_ATTR_RO(uuid);
  832. static ssize_t version_show(struct device *dev, struct device_attribute *a,
  833. char *buf)
  834. {
  835. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  836. u8 version = mei_me_cl_ver(cldev->me_cl);
  837. return sprintf(buf, "%02X", version);
  838. }
  839. static DEVICE_ATTR_RO(version);
  840. static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
  841. char *buf)
  842. {
  843. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  844. const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
  845. u8 version = mei_me_cl_ver(cldev->me_cl);
  846. return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
  847. cldev->name, uuid, version);
  848. }
  849. static DEVICE_ATTR_RO(modalias);
  850. static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
  851. char *buf)
  852. {
  853. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  854. u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
  855. return sprintf(buf, "%d", maxconn);
  856. }
  857. static DEVICE_ATTR_RO(max_conn);
  858. static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
  859. char *buf)
  860. {
  861. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  862. u8 fixed = mei_me_cl_fixed(cldev->me_cl);
  863. return sprintf(buf, "%d", fixed);
  864. }
  865. static DEVICE_ATTR_RO(fixed);
  866. static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
  867. char *buf)
  868. {
  869. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  870. bool vt = mei_me_cl_vt(cldev->me_cl);
  871. return sprintf(buf, "%d", vt);
  872. }
  873. static DEVICE_ATTR_RO(vtag);
  874. static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
  875. char *buf)
  876. {
  877. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  878. u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
  879. return sprintf(buf, "%u", maxlen);
  880. }
  881. static DEVICE_ATTR_RO(max_len);
  882. static struct attribute *mei_cldev_attrs[] = {
  883. &dev_attr_name.attr,
  884. &dev_attr_uuid.attr,
  885. &dev_attr_version.attr,
  886. &dev_attr_modalias.attr,
  887. &dev_attr_max_conn.attr,
  888. &dev_attr_fixed.attr,
  889. &dev_attr_vtag.attr,
  890. &dev_attr_max_len.attr,
  891. NULL,
  892. };
  893. ATTRIBUTE_GROUPS(mei_cldev);
  894. /**
  895. * mei_cl_device_uevent - me client bus uevent handler
  896. *
  897. * @dev: device
  898. * @env: uevent kobject
  899. *
  900. * Return: 0 on success -ENOMEM on when add_uevent_var fails
  901. */
  902. static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
  903. {
  904. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  905. const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
  906. u8 version = mei_me_cl_ver(cldev->me_cl);
  907. if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
  908. return -ENOMEM;
  909. if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
  910. return -ENOMEM;
  911. if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
  912. return -ENOMEM;
  913. if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
  914. cldev->name, uuid, version))
  915. return -ENOMEM;
  916. return 0;
  917. }
  918. static struct bus_type mei_cl_bus_type = {
  919. .name = "mei",
  920. .dev_groups = mei_cldev_groups,
  921. .match = mei_cl_device_match,
  922. .probe = mei_cl_device_probe,
  923. .remove = mei_cl_device_remove,
  924. .uevent = mei_cl_device_uevent,
  925. };
  926. static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
  927. {
  928. if (bus)
  929. get_device(bus->dev);
  930. return bus;
  931. }
  932. static void mei_dev_bus_put(struct mei_device *bus)
  933. {
  934. if (bus)
  935. put_device(bus->dev);
  936. }
  937. static void mei_cl_bus_dev_release(struct device *dev)
  938. {
  939. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  940. if (!cldev)
  941. return;
  942. mei_cl_flush_queues(cldev->cl, NULL);
  943. mei_me_cl_put(cldev->me_cl);
  944. mei_dev_bus_put(cldev->bus);
  945. kfree(cldev->cl);
  946. kfree(cldev);
  947. }
  948. static const struct device_type mei_cl_device_type = {
  949. .release = mei_cl_bus_dev_release,
  950. };
  951. /**
  952. * mei_cl_bus_set_name - set device name for me client device
  953. * <controller>-<client device>
  954. * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
  955. *
  956. * @cldev: me client device
  957. */
  958. static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
  959. {
  960. dev_set_name(&cldev->dev, "%s-%pUl",
  961. dev_name(cldev->bus->dev),
  962. mei_me_cl_uuid(cldev->me_cl));
  963. }
  964. /**
  965. * mei_cl_bus_dev_alloc - initialize and allocate mei client device
  966. *
  967. * @bus: mei device
  968. * @me_cl: me client
  969. *
  970. * Return: allocated device structur or NULL on allocation failure
  971. */
  972. static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
  973. struct mei_me_client *me_cl)
  974. {
  975. struct mei_cl_device *cldev;
  976. struct mei_cl *cl;
  977. cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
  978. if (!cldev)
  979. return NULL;
  980. cl = mei_cl_allocate(bus);
  981. if (!cl) {
  982. kfree(cldev);
  983. return NULL;
  984. }
  985. device_initialize(&cldev->dev);
  986. cldev->dev.parent = bus->dev;
  987. cldev->dev.bus = &mei_cl_bus_type;
  988. cldev->dev.type = &mei_cl_device_type;
  989. cldev->bus = mei_dev_bus_get(bus);
  990. cldev->me_cl = mei_me_cl_get(me_cl);
  991. cldev->cl = cl;
  992. mei_cl_bus_set_name(cldev);
  993. cldev->is_added = 0;
  994. INIT_LIST_HEAD(&cldev->bus_list);
  995. return cldev;
  996. }
  997. /**
  998. * mei_cl_bus_dev_setup - setup me client device
  999. * run fix up routines and set the device name
  1000. *
  1001. * @bus: mei device
  1002. * @cldev: me client device
  1003. *
  1004. * Return: true if the device is eligible for enumeration
  1005. */
  1006. static bool mei_cl_bus_dev_setup(struct mei_device *bus,
  1007. struct mei_cl_device *cldev)
  1008. {
  1009. cldev->do_match = 1;
  1010. mei_cl_bus_dev_fixup(cldev);
  1011. /* the device name can change during fix up */
  1012. if (cldev->do_match)
  1013. mei_cl_bus_set_name(cldev);
  1014. return cldev->do_match == 1;
  1015. }
  1016. /**
  1017. * mei_cl_bus_dev_add - add me client devices
  1018. *
  1019. * @cldev: me client device
  1020. *
  1021. * Return: 0 on success; < 0 on failre
  1022. */
  1023. static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
  1024. {
  1025. int ret;
  1026. dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
  1027. mei_me_cl_uuid(cldev->me_cl),
  1028. mei_me_cl_ver(cldev->me_cl));
  1029. ret = device_add(&cldev->dev);
  1030. if (!ret)
  1031. cldev->is_added = 1;
  1032. return ret;
  1033. }
  1034. /**
  1035. * mei_cl_bus_dev_stop - stop the driver
  1036. *
  1037. * @cldev: me client device
  1038. */
  1039. static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
  1040. {
  1041. if (cldev->is_added)
  1042. device_release_driver(&cldev->dev);
  1043. }
  1044. /**
  1045. * mei_cl_bus_dev_destroy - destroy me client devices object
  1046. *
  1047. * @cldev: me client device
  1048. *
  1049. * Locking: called under "dev->cl_bus_lock" lock
  1050. */
  1051. static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
  1052. {
  1053. WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
  1054. if (!cldev->is_added)
  1055. return;
  1056. device_del(&cldev->dev);
  1057. list_del_init(&cldev->bus_list);
  1058. cldev->is_added = 0;
  1059. put_device(&cldev->dev);
  1060. }
  1061. /**
  1062. * mei_cl_bus_remove_device - remove a devices form the bus
  1063. *
  1064. * @cldev: me client device
  1065. */
  1066. static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
  1067. {
  1068. mei_cl_bus_dev_stop(cldev);
  1069. mei_cl_bus_dev_destroy(cldev);
  1070. }
  1071. /**
  1072. * mei_cl_bus_remove_devices - remove all devices form the bus
  1073. *
  1074. * @bus: mei device
  1075. */
  1076. void mei_cl_bus_remove_devices(struct mei_device *bus)
  1077. {
  1078. struct mei_cl_device *cldev, *next;
  1079. mutex_lock(&bus->cl_bus_lock);
  1080. list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
  1081. mei_cl_bus_remove_device(cldev);
  1082. mutex_unlock(&bus->cl_bus_lock);
  1083. }
  1084. /**
  1085. * mei_cl_bus_dev_init - allocate and initializes an mei client devices
  1086. * based on me client
  1087. *
  1088. * @bus: mei device
  1089. * @me_cl: me client
  1090. *
  1091. * Locking: called under "dev->cl_bus_lock" lock
  1092. */
  1093. static void mei_cl_bus_dev_init(struct mei_device *bus,
  1094. struct mei_me_client *me_cl)
  1095. {
  1096. struct mei_cl_device *cldev;
  1097. WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
  1098. dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
  1099. if (me_cl->bus_added)
  1100. return;
  1101. cldev = mei_cl_bus_dev_alloc(bus, me_cl);
  1102. if (!cldev)
  1103. return;
  1104. me_cl->bus_added = true;
  1105. list_add_tail(&cldev->bus_list, &bus->device_list);
  1106. }
  1107. /**
  1108. * mei_cl_bus_rescan - scan me clients list and add create
  1109. * devices for eligible clients
  1110. *
  1111. * @bus: mei device
  1112. */
  1113. static void mei_cl_bus_rescan(struct mei_device *bus)
  1114. {
  1115. struct mei_cl_device *cldev, *n;
  1116. struct mei_me_client *me_cl;
  1117. mutex_lock(&bus->cl_bus_lock);
  1118. down_read(&bus->me_clients_rwsem);
  1119. list_for_each_entry(me_cl, &bus->me_clients, list)
  1120. mei_cl_bus_dev_init(bus, me_cl);
  1121. up_read(&bus->me_clients_rwsem);
  1122. list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
  1123. if (!mei_me_cl_is_active(cldev->me_cl)) {
  1124. mei_cl_bus_remove_device(cldev);
  1125. continue;
  1126. }
  1127. if (cldev->is_added)
  1128. continue;
  1129. if (mei_cl_bus_dev_setup(bus, cldev))
  1130. mei_cl_bus_dev_add(cldev);
  1131. else {
  1132. list_del_init(&cldev->bus_list);
  1133. put_device(&cldev->dev);
  1134. }
  1135. }
  1136. mutex_unlock(&bus->cl_bus_lock);
  1137. dev_dbg(bus->dev, "rescan end");
  1138. }
  1139. void mei_cl_bus_rescan_work(struct work_struct *work)
  1140. {
  1141. struct mei_device *bus =
  1142. container_of(work, struct mei_device, bus_rescan_work);
  1143. mei_cl_bus_rescan(bus);
  1144. }
  1145. int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
  1146. struct module *owner)
  1147. {
  1148. int err;
  1149. cldrv->driver.name = cldrv->name;
  1150. cldrv->driver.owner = owner;
  1151. cldrv->driver.bus = &mei_cl_bus_type;
  1152. err = driver_register(&cldrv->driver);
  1153. if (err)
  1154. return err;
  1155. pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
  1156. return 0;
  1157. }
  1158. EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
  1159. void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
  1160. {
  1161. driver_unregister(&cldrv->driver);
  1162. pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
  1163. }
  1164. EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
  1165. int __init mei_cl_bus_init(void)
  1166. {
  1167. return bus_register(&mei_cl_bus_type);
  1168. }
  1169. void __exit mei_cl_bus_exit(void)
  1170. {
  1171. bus_unregister(&mei_cl_bus_type);
  1172. }