surface_dtx.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Surface Book (gen. 2 and later) detachment system (DTX) driver.
  4. *
  5. * Provides a user-space interface to properly handle clipboard/tablet
  6. * (containing screen and processor) detachment from the base of the device
  7. * (containing the keyboard and optionally a discrete GPU). Allows to
  8. * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
  9. * use), or request detachment via user-space.
  10. *
  11. * Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
  12. */
  13. #include <linux/fs.h>
  14. #include <linux/input.h>
  15. #include <linux/ioctl.h>
  16. #include <linux/kernel.h>
  17. #include <linux/kfifo.h>
  18. #include <linux/kref.h>
  19. #include <linux/miscdevice.h>
  20. #include <linux/module.h>
  21. #include <linux/mutex.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/poll.h>
  24. #include <linux/rwsem.h>
  25. #include <linux/slab.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/surface_aggregator/controller.h>
  28. #include <linux/surface_aggregator/device.h>
  29. #include <linux/surface_aggregator/dtx.h>
  30. /* -- SSAM interface. ------------------------------------------------------- */
  31. enum sam_event_cid_bas {
  32. SAM_EVENT_CID_DTX_CONNECTION = 0x0c,
  33. SAM_EVENT_CID_DTX_REQUEST = 0x0e,
  34. SAM_EVENT_CID_DTX_CANCEL = 0x0f,
  35. SAM_EVENT_CID_DTX_LATCH_STATUS = 0x11,
  36. };
  37. enum ssam_bas_base_state {
  38. SSAM_BAS_BASE_STATE_DETACH_SUCCESS = 0x00,
  39. SSAM_BAS_BASE_STATE_ATTACHED = 0x01,
  40. SSAM_BAS_BASE_STATE_NOT_FEASIBLE = 0x02,
  41. };
  42. enum ssam_bas_latch_status {
  43. SSAM_BAS_LATCH_STATUS_CLOSED = 0x00,
  44. SSAM_BAS_LATCH_STATUS_OPENED = 0x01,
  45. SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN = 0x02,
  46. SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN = 0x03,
  47. SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE = 0x04,
  48. };
  49. enum ssam_bas_cancel_reason {
  50. SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE = 0x00, /* Low battery. */
  51. SSAM_BAS_CANCEL_REASON_TIMEOUT = 0x02,
  52. SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN = 0x03,
  53. SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN = 0x04,
  54. SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE = 0x05,
  55. };
  56. struct ssam_bas_base_info {
  57. u8 state;
  58. u8 base_id;
  59. } __packed;
  60. static_assert(sizeof(struct ssam_bas_base_info) == 2);
  61. SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
  62. .target_category = SSAM_SSH_TC_BAS,
  63. .target_id = 0x01,
  64. .command_id = 0x06,
  65. .instance_id = 0x00,
  66. });
  67. SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
  68. .target_category = SSAM_SSH_TC_BAS,
  69. .target_id = 0x01,
  70. .command_id = 0x07,
  71. .instance_id = 0x00,
  72. });
  73. SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
  74. .target_category = SSAM_SSH_TC_BAS,
  75. .target_id = 0x01,
  76. .command_id = 0x08,
  77. .instance_id = 0x00,
  78. });
  79. SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
  80. .target_category = SSAM_SSH_TC_BAS,
  81. .target_id = 0x01,
  82. .command_id = 0x09,
  83. .instance_id = 0x00,
  84. });
  85. SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
  86. .target_category = SSAM_SSH_TC_BAS,
  87. .target_id = 0x01,
  88. .command_id = 0x0a,
  89. .instance_id = 0x00,
  90. });
  91. SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
  92. .target_category = SSAM_SSH_TC_BAS,
  93. .target_id = 0x01,
  94. .command_id = 0x0b,
  95. .instance_id = 0x00,
  96. });
  97. SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
  98. .target_category = SSAM_SSH_TC_BAS,
  99. .target_id = 0x01,
  100. .command_id = 0x0c,
  101. .instance_id = 0x00,
  102. });
  103. SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
  104. .target_category = SSAM_SSH_TC_BAS,
  105. .target_id = 0x01,
  106. .command_id = 0x0d,
  107. .instance_id = 0x00,
  108. });
  109. SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
  110. .target_category = SSAM_SSH_TC_BAS,
  111. .target_id = 0x01,
  112. .command_id = 0x11,
  113. .instance_id = 0x00,
  114. });
  115. /* -- Main structures. ------------------------------------------------------ */
  116. enum sdtx_device_state {
  117. SDTX_DEVICE_SHUTDOWN_BIT = BIT(0),
  118. SDTX_DEVICE_DIRTY_BASE_BIT = BIT(1),
  119. SDTX_DEVICE_DIRTY_MODE_BIT = BIT(2),
  120. SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
  121. };
  122. struct sdtx_device {
  123. struct kref kref;
  124. struct rw_semaphore lock; /* Guards device and controller reference. */
  125. struct device *dev;
  126. struct ssam_controller *ctrl;
  127. unsigned long flags;
  128. struct miscdevice mdev;
  129. wait_queue_head_t waitq;
  130. struct mutex write_lock; /* Guards order of events/notifications. */
  131. struct rw_semaphore client_lock; /* Guards client list. */
  132. struct list_head client_list;
  133. struct delayed_work state_work;
  134. struct {
  135. struct ssam_bas_base_info base;
  136. u8 device_mode;
  137. u8 latch_status;
  138. } state;
  139. struct delayed_work mode_work;
  140. struct input_dev *mode_switch;
  141. struct ssam_event_notifier notif;
  142. };
  143. enum sdtx_client_state {
  144. SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
  145. };
  146. struct sdtx_client {
  147. struct sdtx_device *ddev;
  148. struct list_head node;
  149. unsigned long flags;
  150. struct fasync_struct *fasync;
  151. struct mutex read_lock; /* Guards FIFO buffer read access. */
  152. DECLARE_KFIFO(buffer, u8, 512);
  153. };
  154. static void __sdtx_device_release(struct kref *kref)
  155. {
  156. struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
  157. mutex_destroy(&ddev->write_lock);
  158. kfree(ddev);
  159. }
  160. static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
  161. {
  162. if (ddev)
  163. kref_get(&ddev->kref);
  164. return ddev;
  165. }
  166. static void sdtx_device_put(struct sdtx_device *ddev)
  167. {
  168. if (ddev)
  169. kref_put(&ddev->kref, __sdtx_device_release);
  170. }
  171. /* -- Firmware value translations. ------------------------------------------ */
  172. static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
  173. {
  174. switch (state) {
  175. case SSAM_BAS_BASE_STATE_ATTACHED:
  176. return SDTX_BASE_ATTACHED;
  177. case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
  178. return SDTX_BASE_DETACHED;
  179. case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
  180. return SDTX_DETACH_NOT_FEASIBLE;
  181. default:
  182. dev_err(ddev->dev, "unknown base state: %#04x\n", state);
  183. return SDTX_UNKNOWN(state);
  184. }
  185. }
  186. static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
  187. {
  188. switch (status) {
  189. case SSAM_BAS_LATCH_STATUS_CLOSED:
  190. return SDTX_LATCH_CLOSED;
  191. case SSAM_BAS_LATCH_STATUS_OPENED:
  192. return SDTX_LATCH_OPENED;
  193. case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
  194. return SDTX_ERR_FAILED_TO_OPEN;
  195. case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
  196. return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
  197. case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
  198. return SDTX_ERR_FAILED_TO_CLOSE;
  199. default:
  200. dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
  201. return SDTX_UNKNOWN(status);
  202. }
  203. }
  204. static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
  205. {
  206. switch (reason) {
  207. case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
  208. return SDTX_DETACH_NOT_FEASIBLE;
  209. case SSAM_BAS_CANCEL_REASON_TIMEOUT:
  210. return SDTX_DETACH_TIMEDOUT;
  211. case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
  212. return SDTX_ERR_FAILED_TO_OPEN;
  213. case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
  214. return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
  215. case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
  216. return SDTX_ERR_FAILED_TO_CLOSE;
  217. default:
  218. dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
  219. return SDTX_UNKNOWN(reason);
  220. }
  221. }
  222. /* -- IOCTLs. --------------------------------------------------------------- */
  223. static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
  224. struct sdtx_base_info __user *buf)
  225. {
  226. struct ssam_bas_base_info raw;
  227. struct sdtx_base_info info;
  228. int status;
  229. lockdep_assert_held_read(&ddev->lock);
  230. status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
  231. if (status < 0)
  232. return status;
  233. info.state = sdtx_translate_base_state(ddev, raw.state);
  234. info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
  235. if (copy_to_user(buf, &info, sizeof(info)))
  236. return -EFAULT;
  237. return 0;
  238. }
  239. static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
  240. {
  241. u8 mode;
  242. int status;
  243. lockdep_assert_held_read(&ddev->lock);
  244. status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
  245. if (status < 0)
  246. return status;
  247. return put_user(mode, buf);
  248. }
  249. static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
  250. {
  251. u8 latch;
  252. int status;
  253. lockdep_assert_held_read(&ddev->lock);
  254. status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
  255. if (status < 0)
  256. return status;
  257. return put_user(sdtx_translate_latch_status(ddev, latch), buf);
  258. }
  259. static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
  260. {
  261. struct sdtx_device *ddev = client->ddev;
  262. lockdep_assert_held_read(&ddev->lock);
  263. switch (cmd) {
  264. case SDTX_IOCTL_EVENTS_ENABLE:
  265. set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
  266. return 0;
  267. case SDTX_IOCTL_EVENTS_DISABLE:
  268. clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
  269. return 0;
  270. case SDTX_IOCTL_LATCH_LOCK:
  271. return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
  272. case SDTX_IOCTL_LATCH_UNLOCK:
  273. return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
  274. case SDTX_IOCTL_LATCH_REQUEST:
  275. return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
  276. case SDTX_IOCTL_LATCH_CONFIRM:
  277. return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
  278. case SDTX_IOCTL_LATCH_HEARTBEAT:
  279. return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
  280. case SDTX_IOCTL_LATCH_CANCEL:
  281. return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
  282. case SDTX_IOCTL_GET_BASE_INFO:
  283. return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
  284. case SDTX_IOCTL_GET_DEVICE_MODE:
  285. return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
  286. case SDTX_IOCTL_GET_LATCH_STATUS:
  287. return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
  288. default:
  289. return -EINVAL;
  290. }
  291. }
  292. static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  293. {
  294. struct sdtx_client *client = file->private_data;
  295. long status;
  296. if (down_read_killable(&client->ddev->lock))
  297. return -ERESTARTSYS;
  298. if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
  299. up_read(&client->ddev->lock);
  300. return -ENODEV;
  301. }
  302. status = __surface_dtx_ioctl(client, cmd, arg);
  303. up_read(&client->ddev->lock);
  304. return status;
  305. }
  306. /* -- File operations. ------------------------------------------------------ */
  307. static int surface_dtx_open(struct inode *inode, struct file *file)
  308. {
  309. struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
  310. struct sdtx_client *client;
  311. /* Initialize client. */
  312. client = kzalloc(sizeof(*client), GFP_KERNEL);
  313. if (!client)
  314. return -ENOMEM;
  315. client->ddev = sdtx_device_get(ddev);
  316. INIT_LIST_HEAD(&client->node);
  317. mutex_init(&client->read_lock);
  318. INIT_KFIFO(client->buffer);
  319. file->private_data = client;
  320. /* Attach client. */
  321. down_write(&ddev->client_lock);
  322. /*
  323. * Do not add a new client if the device has been shut down. Note that
  324. * it's enough to hold the client_lock here as, during shutdown, we
  325. * only acquire that lock and remove clients after marking the device
  326. * as shut down.
  327. */
  328. if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
  329. up_write(&ddev->client_lock);
  330. mutex_destroy(&client->read_lock);
  331. sdtx_device_put(client->ddev);
  332. kfree(client);
  333. return -ENODEV;
  334. }
  335. list_add_tail(&client->node, &ddev->client_list);
  336. up_write(&ddev->client_lock);
  337. stream_open(inode, file);
  338. return 0;
  339. }
  340. static int surface_dtx_release(struct inode *inode, struct file *file)
  341. {
  342. struct sdtx_client *client = file->private_data;
  343. /* Detach client. */
  344. down_write(&client->ddev->client_lock);
  345. list_del(&client->node);
  346. up_write(&client->ddev->client_lock);
  347. /* Free client. */
  348. sdtx_device_put(client->ddev);
  349. mutex_destroy(&client->read_lock);
  350. kfree(client);
  351. return 0;
  352. }
  353. static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
  354. {
  355. struct sdtx_client *client = file->private_data;
  356. struct sdtx_device *ddev = client->ddev;
  357. unsigned int copied;
  358. int status = 0;
  359. if (down_read_killable(&ddev->lock))
  360. return -ERESTARTSYS;
  361. /* Make sure we're not shut down. */
  362. if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
  363. up_read(&ddev->lock);
  364. return -ENODEV;
  365. }
  366. do {
  367. /* Check availability, wait if necessary. */
  368. if (kfifo_is_empty(&client->buffer)) {
  369. up_read(&ddev->lock);
  370. if (file->f_flags & O_NONBLOCK)
  371. return -EAGAIN;
  372. status = wait_event_interruptible(ddev->waitq,
  373. !kfifo_is_empty(&client->buffer) ||
  374. test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
  375. &ddev->flags));
  376. if (status < 0)
  377. return status;
  378. if (down_read_killable(&ddev->lock))
  379. return -ERESTARTSYS;
  380. /* Need to check that we're not shut down again. */
  381. if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
  382. up_read(&ddev->lock);
  383. return -ENODEV;
  384. }
  385. }
  386. /* Try to read from FIFO. */
  387. if (mutex_lock_interruptible(&client->read_lock)) {
  388. up_read(&ddev->lock);
  389. return -ERESTARTSYS;
  390. }
  391. status = kfifo_to_user(&client->buffer, buf, count, &copied);
  392. mutex_unlock(&client->read_lock);
  393. if (status < 0) {
  394. up_read(&ddev->lock);
  395. return status;
  396. }
  397. /* We might not have gotten anything, check this here. */
  398. if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
  399. up_read(&ddev->lock);
  400. return -EAGAIN;
  401. }
  402. } while (copied == 0);
  403. up_read(&ddev->lock);
  404. return copied;
  405. }
  406. static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
  407. {
  408. struct sdtx_client *client = file->private_data;
  409. __poll_t events = 0;
  410. if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
  411. return EPOLLHUP | EPOLLERR;
  412. poll_wait(file, &client->ddev->waitq, pt);
  413. if (!kfifo_is_empty(&client->buffer))
  414. events |= EPOLLIN | EPOLLRDNORM;
  415. return events;
  416. }
  417. static int surface_dtx_fasync(int fd, struct file *file, int on)
  418. {
  419. struct sdtx_client *client = file->private_data;
  420. return fasync_helper(fd, file, on, &client->fasync);
  421. }
  422. static const struct file_operations surface_dtx_fops = {
  423. .owner = THIS_MODULE,
  424. .open = surface_dtx_open,
  425. .release = surface_dtx_release,
  426. .read = surface_dtx_read,
  427. .poll = surface_dtx_poll,
  428. .fasync = surface_dtx_fasync,
  429. .unlocked_ioctl = surface_dtx_ioctl,
  430. .compat_ioctl = surface_dtx_ioctl,
  431. .llseek = no_llseek,
  432. };
  433. /* -- Event handling/forwarding. -------------------------------------------- */
  434. /*
  435. * The device operation mode is not immediately updated on the EC when the
  436. * base has been connected, i.e. querying the device mode inside the
  437. * connection event callback yields an outdated value. Thus, we can only
  438. * determine the new tablet-mode switch and device mode values after some
  439. * time.
  440. *
  441. * These delays have been chosen by experimenting. We first delay on connect
  442. * events, then check and validate the device mode against the base state and
  443. * if invalid delay again by the "recheck" delay.
  444. */
  445. #define SDTX_DEVICE_MODE_DELAY_CONNECT msecs_to_jiffies(100)
  446. #define SDTX_DEVICE_MODE_DELAY_RECHECK msecs_to_jiffies(100)
  447. struct sdtx_status_event {
  448. struct sdtx_event e;
  449. __u16 v;
  450. } __packed;
  451. struct sdtx_base_info_event {
  452. struct sdtx_event e;
  453. struct sdtx_base_info v;
  454. } __packed;
  455. union sdtx_generic_event {
  456. struct sdtx_event common;
  457. struct sdtx_status_event status;
  458. struct sdtx_base_info_event base;
  459. };
  460. static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
  461. /* Must be executed with ddev->write_lock held. */
  462. static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
  463. {
  464. const size_t len = sizeof(struct sdtx_event) + evt->length;
  465. struct sdtx_client *client;
  466. lockdep_assert_held(&ddev->write_lock);
  467. down_read(&ddev->client_lock);
  468. list_for_each_entry(client, &ddev->client_list, node) {
  469. if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
  470. continue;
  471. if (likely(kfifo_avail(&client->buffer) >= len))
  472. kfifo_in(&client->buffer, (const u8 *)evt, len);
  473. else
  474. dev_warn(ddev->dev, "event buffer overrun\n");
  475. kill_fasync(&client->fasync, SIGIO, POLL_IN);
  476. }
  477. up_read(&ddev->client_lock);
  478. wake_up_interruptible(&ddev->waitq);
  479. }
  480. static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
  481. {
  482. struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
  483. union sdtx_generic_event event;
  484. size_t len;
  485. /* Validate event payload length. */
  486. switch (in->command_id) {
  487. case SAM_EVENT_CID_DTX_CONNECTION:
  488. len = 2 * sizeof(u8);
  489. break;
  490. case SAM_EVENT_CID_DTX_REQUEST:
  491. len = 0;
  492. break;
  493. case SAM_EVENT_CID_DTX_CANCEL:
  494. len = sizeof(u8);
  495. break;
  496. case SAM_EVENT_CID_DTX_LATCH_STATUS:
  497. len = sizeof(u8);
  498. break;
  499. default:
  500. return 0;
  501. }
  502. if (in->length != len) {
  503. dev_err(ddev->dev,
  504. "unexpected payload size for event %#04x: got %u, expected %zu\n",
  505. in->command_id, in->length, len);
  506. return 0;
  507. }
  508. mutex_lock(&ddev->write_lock);
  509. /* Translate event. */
  510. switch (in->command_id) {
  511. case SAM_EVENT_CID_DTX_CONNECTION:
  512. clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
  513. /* If state has not changed: do not send new event. */
  514. if (ddev->state.base.state == in->data[0] &&
  515. ddev->state.base.base_id == in->data[1])
  516. goto out;
  517. ddev->state.base.state = in->data[0];
  518. ddev->state.base.base_id = in->data[1];
  519. event.base.e.length = sizeof(struct sdtx_base_info);
  520. event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
  521. event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
  522. event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
  523. break;
  524. case SAM_EVENT_CID_DTX_REQUEST:
  525. event.common.code = SDTX_EVENT_REQUEST;
  526. event.common.length = 0;
  527. break;
  528. case SAM_EVENT_CID_DTX_CANCEL:
  529. event.status.e.length = sizeof(u16);
  530. event.status.e.code = SDTX_EVENT_CANCEL;
  531. event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
  532. break;
  533. case SAM_EVENT_CID_DTX_LATCH_STATUS:
  534. clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
  535. /* If state has not changed: do not send new event. */
  536. if (ddev->state.latch_status == in->data[0])
  537. goto out;
  538. ddev->state.latch_status = in->data[0];
  539. event.status.e.length = sizeof(u16);
  540. event.status.e.code = SDTX_EVENT_LATCH_STATUS;
  541. event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
  542. break;
  543. }
  544. sdtx_push_event(ddev, &event.common);
  545. /* Update device mode on base connection change. */
  546. if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
  547. unsigned long delay;
  548. delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
  549. sdtx_update_device_mode(ddev, delay);
  550. }
  551. out:
  552. mutex_unlock(&ddev->write_lock);
  553. return SSAM_NOTIF_HANDLED;
  554. }
  555. /* -- State update functions. ----------------------------------------------- */
  556. static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
  557. {
  558. return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
  559. (mode == SDTX_DEVICE_MODE_TABLET)) ||
  560. ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
  561. (mode != SDTX_DEVICE_MODE_TABLET));
  562. }
  563. static void sdtx_device_mode_workfn(struct work_struct *work)
  564. {
  565. struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
  566. struct sdtx_status_event event;
  567. struct ssam_bas_base_info base;
  568. int status, tablet;
  569. u8 mode;
  570. /* Get operation mode. */
  571. status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
  572. if (status) {
  573. dev_err(ddev->dev, "failed to get device mode: %d\n", status);
  574. return;
  575. }
  576. /* Get base info. */
  577. status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
  578. if (status) {
  579. dev_err(ddev->dev, "failed to get base info: %d\n", status);
  580. return;
  581. }
  582. /*
  583. * In some cases (specifically when attaching the base), the device
  584. * mode isn't updated right away. Thus we check if the device mode
  585. * makes sense for the given base state and try again later if it
  586. * doesn't.
  587. */
  588. if (sdtx_device_mode_invalid(mode, base.state)) {
  589. dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
  590. sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
  591. return;
  592. }
  593. mutex_lock(&ddev->write_lock);
  594. clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
  595. /* Avoid sending duplicate device-mode events. */
  596. if (ddev->state.device_mode == mode) {
  597. mutex_unlock(&ddev->write_lock);
  598. return;
  599. }
  600. ddev->state.device_mode = mode;
  601. event.e.length = sizeof(u16);
  602. event.e.code = SDTX_EVENT_DEVICE_MODE;
  603. event.v = mode;
  604. sdtx_push_event(ddev, &event.e);
  605. /* Send SW_TABLET_MODE event. */
  606. tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
  607. input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
  608. input_sync(ddev->mode_switch);
  609. mutex_unlock(&ddev->write_lock);
  610. }
  611. static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
  612. {
  613. schedule_delayed_work(&ddev->mode_work, delay);
  614. }
  615. /* Must be executed with ddev->write_lock held. */
  616. static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
  617. struct ssam_bas_base_info info)
  618. {
  619. struct sdtx_base_info_event event;
  620. lockdep_assert_held(&ddev->write_lock);
  621. /* Prevent duplicate events. */
  622. if (ddev->state.base.state == info.state &&
  623. ddev->state.base.base_id == info.base_id)
  624. return;
  625. ddev->state.base = info;
  626. event.e.length = sizeof(struct sdtx_base_info);
  627. event.e.code = SDTX_EVENT_BASE_CONNECTION;
  628. event.v.state = sdtx_translate_base_state(ddev, info.state);
  629. event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
  630. sdtx_push_event(ddev, &event.e);
  631. }
  632. /* Must be executed with ddev->write_lock held. */
  633. static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
  634. {
  635. struct sdtx_status_event event;
  636. int tablet;
  637. /*
  638. * Note: This function must be called after updating the base state
  639. * via __sdtx_device_state_update_base(), as we rely on the updated
  640. * base state value in the validity check below.
  641. */
  642. lockdep_assert_held(&ddev->write_lock);
  643. if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
  644. dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
  645. sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
  646. return;
  647. }
  648. /* Prevent duplicate events. */
  649. if (ddev->state.device_mode == mode)
  650. return;
  651. ddev->state.device_mode = mode;
  652. /* Send event. */
  653. event.e.length = sizeof(u16);
  654. event.e.code = SDTX_EVENT_DEVICE_MODE;
  655. event.v = mode;
  656. sdtx_push_event(ddev, &event.e);
  657. /* Send SW_TABLET_MODE event. */
  658. tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
  659. input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
  660. input_sync(ddev->mode_switch);
  661. }
  662. /* Must be executed with ddev->write_lock held. */
  663. static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
  664. {
  665. struct sdtx_status_event event;
  666. lockdep_assert_held(&ddev->write_lock);
  667. /* Prevent duplicate events. */
  668. if (ddev->state.latch_status == status)
  669. return;
  670. ddev->state.latch_status = status;
  671. event.e.length = sizeof(struct sdtx_base_info);
  672. event.e.code = SDTX_EVENT_BASE_CONNECTION;
  673. event.v = sdtx_translate_latch_status(ddev, status);
  674. sdtx_push_event(ddev, &event.e);
  675. }
  676. static void sdtx_device_state_workfn(struct work_struct *work)
  677. {
  678. struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
  679. struct ssam_bas_base_info base;
  680. u8 mode, latch;
  681. int status;
  682. /* Mark everything as dirty. */
  683. set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
  684. set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
  685. set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
  686. /*
  687. * Ensure that the state gets marked as dirty before continuing to
  688. * query it. Necessary to ensure that clear_bit() calls in
  689. * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
  690. * bits if an event is received while updating the state here.
  691. */
  692. smp_mb__after_atomic();
  693. status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
  694. if (status) {
  695. dev_err(ddev->dev, "failed to get base state: %d\n", status);
  696. return;
  697. }
  698. status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
  699. if (status) {
  700. dev_err(ddev->dev, "failed to get device mode: %d\n", status);
  701. return;
  702. }
  703. status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
  704. if (status) {
  705. dev_err(ddev->dev, "failed to get latch status: %d\n", status);
  706. return;
  707. }
  708. mutex_lock(&ddev->write_lock);
  709. /*
  710. * If the respective dirty-bit has been cleared, an event has been
  711. * received, updating this state. The queried state may thus be out of
  712. * date. At this point, we can safely assume that the state provided
  713. * by the event is either up to date, or we're about to receive
  714. * another event updating it.
  715. */
  716. if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
  717. __sdtx_device_state_update_base(ddev, base);
  718. if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
  719. __sdtx_device_state_update_mode(ddev, mode);
  720. if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
  721. __sdtx_device_state_update_latch(ddev, latch);
  722. mutex_unlock(&ddev->write_lock);
  723. }
  724. static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
  725. {
  726. schedule_delayed_work(&ddev->state_work, delay);
  727. }
  728. /* -- Common device initialization. ----------------------------------------- */
  729. static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
  730. struct ssam_controller *ctrl)
  731. {
  732. int status, tablet_mode;
  733. /* Basic initialization. */
  734. kref_init(&ddev->kref);
  735. init_rwsem(&ddev->lock);
  736. ddev->dev = dev;
  737. ddev->ctrl = ctrl;
  738. ddev->mdev.minor = MISC_DYNAMIC_MINOR;
  739. ddev->mdev.name = "surface_dtx";
  740. ddev->mdev.nodename = "surface/dtx";
  741. ddev->mdev.fops = &surface_dtx_fops;
  742. ddev->notif.base.priority = 1;
  743. ddev->notif.base.fn = sdtx_notifier;
  744. ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
  745. ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
  746. ddev->notif.event.id.instance = 0;
  747. ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
  748. ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
  749. init_waitqueue_head(&ddev->waitq);
  750. mutex_init(&ddev->write_lock);
  751. init_rwsem(&ddev->client_lock);
  752. INIT_LIST_HEAD(&ddev->client_list);
  753. INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
  754. INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
  755. /*
  756. * Get current device state. We want to guarantee that events are only
  757. * sent when state actually changes. Thus we cannot use special
  758. * "uninitialized" values, as that would cause problems when manually
  759. * querying the state in surface_dtx_pm_complete(). I.e. we would not
  760. * be able to detect state changes there if no change event has been
  761. * received between driver initialization and first device suspension.
  762. *
  763. * Note that we also need to do this before registering the event
  764. * notifier, as that may access the state values.
  765. */
  766. status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
  767. if (status)
  768. return status;
  769. status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
  770. if (status)
  771. return status;
  772. status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
  773. if (status)
  774. return status;
  775. /* Set up tablet mode switch. */
  776. ddev->mode_switch = input_allocate_device();
  777. if (!ddev->mode_switch)
  778. return -ENOMEM;
  779. ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
  780. ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
  781. ddev->mode_switch->id.bustype = BUS_HOST;
  782. ddev->mode_switch->dev.parent = ddev->dev;
  783. tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
  784. input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
  785. input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
  786. status = input_register_device(ddev->mode_switch);
  787. if (status) {
  788. input_free_device(ddev->mode_switch);
  789. return status;
  790. }
  791. /* Set up event notifier. */
  792. status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
  793. if (status)
  794. goto err_notif;
  795. /* Register miscdevice. */
  796. status = misc_register(&ddev->mdev);
  797. if (status)
  798. goto err_mdev;
  799. /*
  800. * Update device state in case it has changed between getting the
  801. * initial mode and registering the event notifier.
  802. */
  803. sdtx_update_device_state(ddev, 0);
  804. return 0;
  805. err_notif:
  806. ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
  807. cancel_delayed_work_sync(&ddev->mode_work);
  808. err_mdev:
  809. input_unregister_device(ddev->mode_switch);
  810. return status;
  811. }
  812. static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
  813. {
  814. struct sdtx_device *ddev;
  815. int status;
  816. ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
  817. if (!ddev)
  818. return ERR_PTR(-ENOMEM);
  819. status = sdtx_device_init(ddev, dev, ctrl);
  820. if (status) {
  821. sdtx_device_put(ddev);
  822. return ERR_PTR(status);
  823. }
  824. return ddev;
  825. }
  826. static void sdtx_device_destroy(struct sdtx_device *ddev)
  827. {
  828. struct sdtx_client *client;
  829. /*
  830. * Mark device as shut-down. Prevent new clients from being added and
  831. * new operations from being executed.
  832. */
  833. set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
  834. /* Disable notifiers, prevent new events from arriving. */
  835. ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
  836. /* Stop mode_work, prevent access to mode_switch. */
  837. cancel_delayed_work_sync(&ddev->mode_work);
  838. /* Stop state_work. */
  839. cancel_delayed_work_sync(&ddev->state_work);
  840. /* With mode_work canceled, we can unregister the mode_switch. */
  841. input_unregister_device(ddev->mode_switch);
  842. /* Wake up async clients. */
  843. down_write(&ddev->client_lock);
  844. list_for_each_entry(client, &ddev->client_list, node) {
  845. kill_fasync(&client->fasync, SIGIO, POLL_HUP);
  846. }
  847. up_write(&ddev->client_lock);
  848. /* Wake up blocking clients. */
  849. wake_up_interruptible(&ddev->waitq);
  850. /*
  851. * Wait for clients to finish their current operation. After this, the
  852. * controller and device references are guaranteed to be no longer in
  853. * use.
  854. */
  855. down_write(&ddev->lock);
  856. ddev->dev = NULL;
  857. ddev->ctrl = NULL;
  858. up_write(&ddev->lock);
  859. /* Finally remove the misc-device. */
  860. misc_deregister(&ddev->mdev);
  861. /*
  862. * We're now guaranteed that sdtx_device_open() won't be called any
  863. * more, so we can now drop out reference.
  864. */
  865. sdtx_device_put(ddev);
  866. }
  867. /* -- PM ops. --------------------------------------------------------------- */
  868. #ifdef CONFIG_PM_SLEEP
  869. static void surface_dtx_pm_complete(struct device *dev)
  870. {
  871. struct sdtx_device *ddev = dev_get_drvdata(dev);
  872. /*
  873. * Normally, the EC will store events while suspended (i.e. in
  874. * display-off state) and release them when resumed (i.e. transitioned
  875. * to display-on state). During hibernation, however, the EC will be
  876. * shut down and does not store events. Furthermore, events might be
  877. * dropped during prolonged suspension (it is currently unknown how
  878. * big this event buffer is and how it behaves on overruns).
  879. *
  880. * To prevent any problems, we update the device state here. We do
  881. * this delayed to ensure that any events sent by the EC directly
  882. * after resuming will be handled first. The delay below has been
  883. * chosen (experimentally), so that there should be ample time for
  884. * these events to be handled, before we check and, if necessary,
  885. * update the state.
  886. */
  887. sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
  888. }
  889. static const struct dev_pm_ops surface_dtx_pm_ops = {
  890. .complete = surface_dtx_pm_complete,
  891. };
  892. #else /* CONFIG_PM_SLEEP */
  893. static const struct dev_pm_ops surface_dtx_pm_ops = {};
  894. #endif /* CONFIG_PM_SLEEP */
  895. /* -- Platform driver. ------------------------------------------------------ */
  896. static int surface_dtx_platform_probe(struct platform_device *pdev)
  897. {
  898. struct ssam_controller *ctrl;
  899. struct sdtx_device *ddev;
  900. /* Link to EC. */
  901. ctrl = ssam_client_bind(&pdev->dev);
  902. if (IS_ERR(ctrl))
  903. return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
  904. ddev = sdtx_device_create(&pdev->dev, ctrl);
  905. if (IS_ERR(ddev))
  906. return PTR_ERR(ddev);
  907. platform_set_drvdata(pdev, ddev);
  908. return 0;
  909. }
  910. static int surface_dtx_platform_remove(struct platform_device *pdev)
  911. {
  912. sdtx_device_destroy(platform_get_drvdata(pdev));
  913. return 0;
  914. }
  915. static const struct acpi_device_id surface_dtx_acpi_match[] = {
  916. { "MSHW0133", 0 },
  917. { },
  918. };
  919. MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
  920. static struct platform_driver surface_dtx_platform_driver = {
  921. .probe = surface_dtx_platform_probe,
  922. .remove = surface_dtx_platform_remove,
  923. .driver = {
  924. .name = "surface_dtx_pltf",
  925. .acpi_match_table = surface_dtx_acpi_match,
  926. .pm = &surface_dtx_pm_ops,
  927. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  928. },
  929. };
  930. /* -- SSAM device driver. --------------------------------------------------- */
  931. #ifdef CONFIG_SURFACE_AGGREGATOR_BUS
  932. static int surface_dtx_ssam_probe(struct ssam_device *sdev)
  933. {
  934. struct sdtx_device *ddev;
  935. ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
  936. if (IS_ERR(ddev))
  937. return PTR_ERR(ddev);
  938. ssam_device_set_drvdata(sdev, ddev);
  939. return 0;
  940. }
  941. static void surface_dtx_ssam_remove(struct ssam_device *sdev)
  942. {
  943. sdtx_device_destroy(ssam_device_get_drvdata(sdev));
  944. }
  945. static const struct ssam_device_id surface_dtx_ssam_match[] = {
  946. { SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
  947. { },
  948. };
  949. MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
  950. static struct ssam_device_driver surface_dtx_ssam_driver = {
  951. .probe = surface_dtx_ssam_probe,
  952. .remove = surface_dtx_ssam_remove,
  953. .match_table = surface_dtx_ssam_match,
  954. .driver = {
  955. .name = "surface_dtx",
  956. .pm = &surface_dtx_pm_ops,
  957. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  958. },
  959. };
  960. static int ssam_dtx_driver_register(void)
  961. {
  962. return ssam_device_driver_register(&surface_dtx_ssam_driver);
  963. }
  964. static void ssam_dtx_driver_unregister(void)
  965. {
  966. ssam_device_driver_unregister(&surface_dtx_ssam_driver);
  967. }
  968. #else /* CONFIG_SURFACE_AGGREGATOR_BUS */
  969. static int ssam_dtx_driver_register(void)
  970. {
  971. return 0;
  972. }
  973. static void ssam_dtx_driver_unregister(void)
  974. {
  975. }
  976. #endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
  977. /* -- Module setup. --------------------------------------------------------- */
  978. static int __init surface_dtx_init(void)
  979. {
  980. int status;
  981. status = ssam_dtx_driver_register();
  982. if (status)
  983. return status;
  984. status = platform_driver_register(&surface_dtx_platform_driver);
  985. if (status)
  986. ssam_dtx_driver_unregister();
  987. return status;
  988. }
  989. module_init(surface_dtx_init);
  990. static void __exit surface_dtx_exit(void)
  991. {
  992. platform_driver_unregister(&surface_dtx_platform_driver);
  993. ssam_dtx_driver_unregister();
  994. }
  995. module_exit(surface_dtx_exit);
  996. MODULE_AUTHOR("Maximilian Luz <[email protected]>");
  997. MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
  998. MODULE_LICENSE("GPL");