f_diag.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* drivers/usb/gadget/f_diag.c
  3. * Diag Function Device - Route ARM9 and ARM11 DIAG messages
  4. * between HOST and DEVICE.
  5. * Copyright (C) 2007 Google, Inc.
  6. * Copyright (c) 2008-2020, The Linux Foundation. All rights reserved.
  7. * Author: Brian Swetland <[email protected]>
  8. */
  9. #include <linux/init.h>
  10. #include <linux/module.h>
  11. #include <linux/kernel.h>
  12. #include <linux/kref.h>
  13. #include <linux/of_address.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/ratelimit.h>
  16. #include <linux/usb/usbdiag.h>
  17. #include <linux/usb/composite.h>
  18. #include <linux/usb/gadget.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/kmemleak.h>
  22. #define MAX_INST_NAME_LEN 40
  23. /* dload specific suppot */
  24. #define PID_MAGIC_ID 0x71432909
  25. #define SERIAL_NUM_MAGIC_ID 0x61945374
  26. #define SERIAL_NUMBER_LENGTH 128
  27. struct dload_struct {
  28. u32 pid;
  29. char serial_number[SERIAL_NUMBER_LENGTH];
  30. u32 pid_magic;
  31. u32 serial_magic;
  32. };
  33. /* for configfs support */
  34. struct diag_opts {
  35. struct usb_function_instance func_inst;
  36. char *name;
  37. struct dload_struct dload;
  38. };
  39. static inline struct diag_opts *to_diag_opts(struct config_item *item)
  40. {
  41. return container_of(to_config_group(item), struct diag_opts,
  42. func_inst.group);
  43. }
  44. static DEFINE_SPINLOCK(ch_lock);
  45. static LIST_HEAD(usb_diag_ch_list);
  46. static struct dload_struct __iomem *diag_dload;
  47. static struct usb_interface_descriptor intf_desc = {
  48. .bLength = sizeof(intf_desc),
  49. .bDescriptorType = USB_DT_INTERFACE,
  50. .bNumEndpoints = 2,
  51. .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
  52. .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
  53. .bInterfaceProtocol = 0x30,
  54. };
  55. static struct usb_endpoint_descriptor hs_bulk_in_desc = {
  56. .bLength = USB_DT_ENDPOINT_SIZE,
  57. .bDescriptorType = USB_DT_ENDPOINT,
  58. .bEndpointAddress = USB_DIR_IN,
  59. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  60. .wMaxPacketSize = cpu_to_le16(512),
  61. .bInterval = 0,
  62. };
  63. static struct usb_endpoint_descriptor fs_bulk_in_desc = {
  64. .bLength = USB_DT_ENDPOINT_SIZE,
  65. .bDescriptorType = USB_DT_ENDPOINT,
  66. .bEndpointAddress = USB_DIR_IN,
  67. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  68. .wMaxPacketSize = cpu_to_le16(64),
  69. .bInterval = 0,
  70. };
  71. static struct usb_endpoint_descriptor hs_bulk_out_desc = {
  72. .bLength = USB_DT_ENDPOINT_SIZE,
  73. .bDescriptorType = USB_DT_ENDPOINT,
  74. .bEndpointAddress = USB_DIR_OUT,
  75. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  76. .wMaxPacketSize = cpu_to_le16(512),
  77. .bInterval = 0,
  78. };
  79. static struct usb_endpoint_descriptor fs_bulk_out_desc = {
  80. .bLength = USB_DT_ENDPOINT_SIZE,
  81. .bDescriptorType = USB_DT_ENDPOINT,
  82. .bEndpointAddress = USB_DIR_OUT,
  83. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  84. .wMaxPacketSize = cpu_to_le16(64),
  85. .bInterval = 0,
  86. };
  87. static struct usb_endpoint_descriptor ss_bulk_in_desc = {
  88. .bLength = USB_DT_ENDPOINT_SIZE,
  89. .bDescriptorType = USB_DT_ENDPOINT,
  90. .bEndpointAddress = USB_DIR_IN,
  91. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  92. .wMaxPacketSize = cpu_to_le16(1024),
  93. };
  94. static struct usb_ss_ep_comp_descriptor ss_bulk_in_comp_desc = {
  95. .bLength = sizeof(ss_bulk_in_comp_desc),
  96. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  97. /* the following 2 values can be tweaked if necessary */
  98. /* .bMaxBurst = 0, */
  99. /* .bmAttributes = 0, */
  100. };
  101. static struct usb_endpoint_descriptor ss_bulk_out_desc = {
  102. .bLength = USB_DT_ENDPOINT_SIZE,
  103. .bDescriptorType = USB_DT_ENDPOINT,
  104. .bEndpointAddress = USB_DIR_OUT,
  105. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  106. .wMaxPacketSize = cpu_to_le16(1024),
  107. };
  108. static struct usb_ss_ep_comp_descriptor ss_bulk_out_comp_desc = {
  109. .bLength = sizeof(ss_bulk_out_comp_desc),
  110. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  111. /* the following 2 values can be tweaked if necessary */
  112. /* .bMaxBurst = 0, */
  113. /* .bmAttributes = 0, */
  114. };
  115. static struct usb_descriptor_header *fs_diag_desc[] = {
  116. (struct usb_descriptor_header *) &intf_desc,
  117. (struct usb_descriptor_header *) &fs_bulk_in_desc,
  118. (struct usb_descriptor_header *) &fs_bulk_out_desc,
  119. NULL,
  120. };
  121. static struct usb_descriptor_header *hs_diag_desc[] = {
  122. (struct usb_descriptor_header *) &intf_desc,
  123. (struct usb_descriptor_header *) &hs_bulk_in_desc,
  124. (struct usb_descriptor_header *) &hs_bulk_out_desc,
  125. NULL,
  126. };
  127. static struct usb_descriptor_header *ss_diag_desc[] = {
  128. (struct usb_descriptor_header *) &intf_desc,
  129. (struct usb_descriptor_header *) &ss_bulk_in_desc,
  130. (struct usb_descriptor_header *) &ss_bulk_in_comp_desc,
  131. (struct usb_descriptor_header *) &ss_bulk_out_desc,
  132. (struct usb_descriptor_header *) &ss_bulk_out_comp_desc,
  133. NULL,
  134. };
  135. /**
  136. * struct diag_context - USB diag function driver private structure
  137. * @function: function structure for USB interface
  138. * @out: USB OUT endpoint struct
  139. * @in: USB IN endpoint struct
  140. * @in_desc: USB IN endpoint descriptor struct
  141. * @out_desc: USB OUT endpoint descriptor struct
  142. * @read_pool: List of requests used for Rx (OUT ep)
  143. * @write_pool: List of requests used for Tx (IN ep)
  144. * @lock: Spinlock to proctect read_pool, write_pool lists
  145. * @cdev: USB composite device struct
  146. * @ch: USB diag channel
  147. *
  148. */
  149. struct diag_context {
  150. struct usb_function function;
  151. struct usb_ep *out;
  152. struct usb_ep *in;
  153. struct list_head read_pool;
  154. struct list_head write_pool;
  155. spinlock_t lock;
  156. unsigned int configured;
  157. struct usb_composite_dev *cdev;
  158. struct usb_diag_ch *ch;
  159. struct kref kref;
  160. /* pkt counters */
  161. unsigned long dpkts_tolaptop;
  162. unsigned long dpkts_tomodem;
  163. unsigned int dpkts_tolaptop_pending;
  164. /* A list node inside the diag_dev_list */
  165. struct list_head list_item;
  166. };
  167. static struct list_head diag_dev_list;
  168. static inline struct diag_context *func_to_diag(struct usb_function *f)
  169. {
  170. return container_of(f, struct diag_context, function);
  171. }
  172. /* Called with ctxt->lock held; i.e. only use with kref_put_lock() */
  173. static void diag_context_release(struct kref *kref)
  174. {
  175. struct diag_context *ctxt =
  176. container_of(kref, struct diag_context, kref);
  177. spin_unlock(&ctxt->lock);
  178. kfree(ctxt);
  179. }
  180. static void diag_update_pid_and_serial_num(struct diag_context *ctxt)
  181. {
  182. struct usb_composite_dev *cdev = ctxt->cdev;
  183. struct usb_gadget_strings **table;
  184. struct usb_string *s;
  185. struct usb_gadget_string_container *uc;
  186. struct dload_struct local_diag_dload = { 0 };
  187. /*
  188. * update pid and serial number to dload only if diag
  189. * interface is zeroth interface.
  190. */
  191. if (intf_desc.bInterfaceNumber)
  192. return;
  193. if (!diag_dload) {
  194. pr_debug("%s: unable to update PID and serial_no\n", __func__);
  195. return;
  196. }
  197. /* update pid */
  198. local_diag_dload.pid = cdev->desc.idProduct;
  199. local_diag_dload.pid_magic = PID_MAGIC_ID;
  200. local_diag_dload.serial_magic = SERIAL_NUM_MAGIC_ID;
  201. list_for_each_entry(uc, &cdev->gstrings, list) {
  202. table = (struct usb_gadget_strings **)uc->stash;
  203. if (!table) {
  204. pr_err("%s: can't update dload cookie\n", __func__);
  205. break;
  206. }
  207. for (s = (*table)->strings; s && s->s; s++) {
  208. if (s->id == cdev->desc.iSerialNumber) {
  209. strscpy(local_diag_dload.serial_number, s->s,
  210. SERIAL_NUMBER_LENGTH);
  211. goto update_dload;
  212. }
  213. }
  214. }
  215. update_dload:
  216. pr_debug("%s: dload:%pK pid:%x serial_num:%s\n",
  217. __func__, diag_dload, local_diag_dload.pid,
  218. local_diag_dload.serial_number);
  219. memcpy_toio(diag_dload, &local_diag_dload, sizeof(local_diag_dload));
  220. }
  221. static void diag_write_complete(struct usb_ep *ep,
  222. struct usb_request *req)
  223. {
  224. struct diag_context *ctxt = ep->driver_data;
  225. struct diag_request *d_req = req->context;
  226. unsigned long flags;
  227. ctxt->dpkts_tolaptop_pending--;
  228. if (!req->status)
  229. ctxt->dpkts_tolaptop++;
  230. spin_lock_irqsave(&ctxt->lock, flags);
  231. list_add_tail(&req->list, &ctxt->write_pool);
  232. if (req->length != 0) {
  233. d_req->actual = req->actual;
  234. d_req->status = req->status;
  235. }
  236. spin_unlock_irqrestore(&ctxt->lock, flags);
  237. if (ctxt->ch && ctxt->ch->notify)
  238. ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_WRITE_DONE, d_req);
  239. kref_put_lock(&ctxt->kref, diag_context_release, &ctxt->lock);
  240. }
  241. static void diag_read_complete(struct usb_ep *ep,
  242. struct usb_request *req)
  243. {
  244. struct diag_context *ctxt = ep->driver_data;
  245. struct diag_request *d_req = req->context;
  246. unsigned long flags;
  247. d_req->actual = req->actual;
  248. d_req->status = req->status;
  249. spin_lock_irqsave(&ctxt->lock, flags);
  250. list_add_tail(&req->list, &ctxt->read_pool);
  251. spin_unlock_irqrestore(&ctxt->lock, flags);
  252. ctxt->dpkts_tomodem++;
  253. if (ctxt->ch && ctxt->ch->notify)
  254. ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_READ_DONE, d_req);
  255. kref_put_lock(&ctxt->kref, diag_context_release, &ctxt->lock);
  256. }
  257. /**
  258. * usb_diag_open() - Open a diag channel over USB
  259. * @name: Name of the channel
  260. * @priv: Private structure pointer which will be passed in notify()
  261. * @notify: Callback function to receive notifications
  262. *
  263. * This function iterates overs the available channels and returns
  264. * the channel handler if the name matches. The notify callback is called
  265. * for CONNECT, DISCONNECT, READ_DONE and WRITE_DONE events.
  266. *
  267. */
  268. struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
  269. void (*notify)(void *, unsigned int, struct diag_request *))
  270. {
  271. struct usb_diag_ch *ch;
  272. unsigned long flags;
  273. int found = 0;
  274. bool connected = false;
  275. struct diag_context *dev;
  276. spin_lock_irqsave(&ch_lock, flags);
  277. /* Check if we already have a channel with this name */
  278. list_for_each_entry(ch, &usb_diag_ch_list, list) {
  279. if (!strcmp(name, ch->name)) {
  280. found = 1;
  281. break;
  282. }
  283. }
  284. spin_unlock_irqrestore(&ch_lock, flags);
  285. if (!found) {
  286. ch = kzalloc(sizeof(*ch), GFP_KERNEL);
  287. if (!ch)
  288. return ERR_PTR(-ENOMEM);
  289. }
  290. ch->name = name;
  291. ch->priv = priv;
  292. ch->notify = notify;
  293. if (!found) {
  294. spin_lock_irqsave(&ch_lock, flags);
  295. list_add_tail(&ch->list, &usb_diag_ch_list);
  296. spin_unlock_irqrestore(&ch_lock, flags);
  297. }
  298. if (ch->priv_usb) {
  299. dev = ch->priv_usb;
  300. spin_lock_irqsave(&dev->lock, flags);
  301. connected = dev->configured;
  302. spin_unlock_irqrestore(&dev->lock, flags);
  303. }
  304. if (ch->notify && connected)
  305. ch->notify(priv, USB_DIAG_CONNECT, NULL);
  306. return ch;
  307. }
  308. EXPORT_SYMBOL(usb_diag_open);
  309. /**
  310. * usb_diag_close() - Close a diag channel over USB
  311. * @ch: Channel handler
  312. *
  313. * This function closes the diag channel.
  314. *
  315. */
  316. void usb_diag_close(struct usb_diag_ch *ch)
  317. {
  318. struct diag_context *dev = NULL;
  319. unsigned long flags;
  320. spin_lock_irqsave(&ch_lock, flags);
  321. ch->priv = NULL;
  322. ch->notify = NULL;
  323. /* Free-up the resources if channel is no more active */
  324. list_del(&ch->list);
  325. list_for_each_entry(dev, &diag_dev_list, list_item)
  326. if (dev->ch == ch)
  327. dev->ch = NULL;
  328. kfree(ch);
  329. spin_unlock_irqrestore(&ch_lock, flags);
  330. }
  331. EXPORT_SYMBOL(usb_diag_close);
  332. static void free_reqs(struct diag_context *ctxt)
  333. {
  334. struct list_head *act, *tmp;
  335. struct usb_request *req;
  336. list_for_each_safe(act, tmp, &ctxt->write_pool) {
  337. req = list_entry(act, struct usb_request, list);
  338. list_del(&req->list);
  339. usb_ep_free_request(ctxt->in, req);
  340. }
  341. list_for_each_safe(act, tmp, &ctxt->read_pool) {
  342. req = list_entry(act, struct usb_request, list);
  343. list_del(&req->list);
  344. usb_ep_free_request(ctxt->out, req);
  345. }
  346. }
  347. /**
  348. * usb_diag_alloc_req() - Allocate USB requests
  349. * @ch: Channel handler
  350. * @n_write: Number of requests for Tx
  351. * @n_read: Number of requests for Rx
  352. *
  353. * This function allocate read and write USB requests for the interface
  354. * associated with this channel. The actual buffer is not allocated.
  355. * The buffer is passed by diag char driver.
  356. *
  357. */
  358. int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read)
  359. {
  360. struct diag_context *ctxt = ch->priv_usb;
  361. struct usb_request *req;
  362. int i;
  363. unsigned long flags;
  364. if (!ctxt)
  365. return -ENODEV;
  366. spin_lock_irqsave(&ctxt->lock, flags);
  367. /* Free previous session's stale requests */
  368. free_reqs(ctxt);
  369. for (i = 0; i < n_write; i++) {
  370. req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC);
  371. if (!req)
  372. goto fail;
  373. kmemleak_not_leak(req);
  374. req->complete = diag_write_complete;
  375. req->zero = true;
  376. list_add_tail(&req->list, &ctxt->write_pool);
  377. }
  378. for (i = 0; i < n_read; i++) {
  379. req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC);
  380. if (!req)
  381. goto fail;
  382. kmemleak_not_leak(req);
  383. req->complete = diag_read_complete;
  384. list_add_tail(&req->list, &ctxt->read_pool);
  385. }
  386. spin_unlock_irqrestore(&ctxt->lock, flags);
  387. return 0;
  388. fail:
  389. free_reqs(ctxt);
  390. spin_unlock_irqrestore(&ctxt->lock, flags);
  391. return -ENOMEM;
  392. }
  393. EXPORT_SYMBOL(usb_diag_alloc_req);
  394. #define DWC3_MAX_REQUEST_SIZE (16 * 1024 * 1024)
  395. /**
  396. * usb_diag_request_size - Max request size for controller
  397. * @ch: Channel handler
  398. *
  399. * Infom max request size so that diag driver can split packets
  400. * in chunks of max size which controller can handle.
  401. */
  402. int usb_diag_request_size(struct usb_diag_ch *ch)
  403. {
  404. return DWC3_MAX_REQUEST_SIZE;
  405. }
  406. EXPORT_SYMBOL(usb_diag_request_size);
  407. /**
  408. * usb_diag_read() - Read data from USB diag channel
  409. * @ch: Channel handler
  410. * @d_req: Diag request struct
  411. *
  412. * Enqueue a request on OUT endpoint of the interface corresponding to this
  413. * channel. This function returns proper error code when interface is not
  414. * in configured state, no Rx requests available and ep queue is failed.
  415. *
  416. * This function operates asynchronously. READ_DONE event is notified after
  417. * completion of OUT request.
  418. *
  419. */
  420. int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req)
  421. {
  422. struct diag_context *ctxt = ch->priv_usb;
  423. unsigned long flags;
  424. struct usb_request *req;
  425. struct usb_ep *out;
  426. static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
  427. if (!ctxt)
  428. return -ENODEV;
  429. spin_lock_irqsave(&ctxt->lock, flags);
  430. if (!ctxt->configured || !ctxt->out) {
  431. spin_unlock_irqrestore(&ctxt->lock, flags);
  432. return -EIO;
  433. }
  434. out = ctxt->out;
  435. if (list_empty(&ctxt->read_pool)) {
  436. spin_unlock_irqrestore(&ctxt->lock, flags);
  437. ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
  438. return -EAGAIN;
  439. }
  440. req = list_first_entry(&ctxt->read_pool, struct usb_request, list);
  441. list_del(&req->list);
  442. kref_get(&ctxt->kref); /* put called in complete callback */
  443. spin_unlock_irqrestore(&ctxt->lock, flags);
  444. req->buf = d_req->buf;
  445. req->length = d_req->length;
  446. req->context = d_req;
  447. /* make sure context is still valid after releasing lock */
  448. if (ctxt != ch->priv_usb) {
  449. usb_ep_free_request(out, req);
  450. kref_put_lock(&ctxt->kref, diag_context_release, &ctxt->lock);
  451. return -EIO;
  452. }
  453. if (usb_ep_queue(out, req, GFP_ATOMIC)) {
  454. /* If error add the link to linked list again*/
  455. spin_lock_irqsave(&ctxt->lock, flags);
  456. list_add_tail(&req->list, &ctxt->read_pool);
  457. /* 1 error message for every 10 sec */
  458. if (__ratelimit(&rl))
  459. ERROR(ctxt->cdev, "%s: cannot queue read request\n",
  460. __func__);
  461. if (kref_put(&ctxt->kref, diag_context_release))
  462. /* diag_context_release called spin_unlock already */
  463. local_irq_restore(flags);
  464. else
  465. spin_unlock_irqrestore(&ctxt->lock, flags);
  466. return -EIO;
  467. }
  468. return 0;
  469. }
  470. EXPORT_SYMBOL(usb_diag_read);
  471. /**
  472. * usb_diag_write() - Write data from USB diag channel
  473. * @ch: Channel handler
  474. * @d_req: Diag request struct
  475. *
  476. * Enqueue a request on IN endpoint of the interface corresponding to this
  477. * channel. This function returns proper error code when interface is not
  478. * in configured state, no Tx requests available and ep queue is failed.
  479. *
  480. * This function operates asynchronously. WRITE_DONE event is notified after
  481. * completion of IN request.
  482. *
  483. */
  484. int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req)
  485. {
  486. struct diag_context *ctxt = ch->priv_usb;
  487. unsigned long flags;
  488. struct usb_request *req = NULL;
  489. struct usb_ep *in;
  490. static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
  491. if (!ctxt)
  492. return -ENODEV;
  493. spin_lock_irqsave(&ctxt->lock, flags);
  494. if (!ctxt->configured || !ctxt->in) {
  495. spin_unlock_irqrestore(&ctxt->lock, flags);
  496. return -EIO;
  497. }
  498. in = ctxt->in;
  499. if (list_empty(&ctxt->write_pool)) {
  500. spin_unlock_irqrestore(&ctxt->lock, flags);
  501. ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
  502. return -EAGAIN;
  503. }
  504. req = list_first_entry(&ctxt->write_pool, struct usb_request, list);
  505. list_del(&req->list);
  506. kref_get(&ctxt->kref); /* put called in complete callback */
  507. spin_unlock_irqrestore(&ctxt->lock, flags);
  508. req->buf = d_req->buf;
  509. req->length = d_req->length;
  510. req->context = d_req;
  511. /* make sure context is still valid after releasing lock */
  512. if (ctxt != ch->priv_usb) {
  513. usb_ep_free_request(in, req);
  514. kref_put_lock(&ctxt->kref, diag_context_release, &ctxt->lock);
  515. return -EIO;
  516. }
  517. ctxt->dpkts_tolaptop_pending++;
  518. if (usb_ep_queue(in, req, GFP_ATOMIC)) {
  519. /* If error add the link to linked list again*/
  520. spin_lock_irqsave(&ctxt->lock, flags);
  521. list_add_tail(&req->list, &ctxt->write_pool);
  522. ctxt->dpkts_tolaptop_pending--;
  523. /* 1 error message for every 10 sec */
  524. if (__ratelimit(&rl))
  525. ERROR(ctxt->cdev, "%s: cannot queue read request\n",
  526. __func__);
  527. if (kref_put(&ctxt->kref, diag_context_release))
  528. /* diag_context_release called spin_unlock already */
  529. local_irq_restore(flags);
  530. else
  531. spin_unlock_irqrestore(&ctxt->lock, flags);
  532. return -EIO;
  533. }
  534. /*
  535. * It's possible that both write completion AND unbind could have been
  536. * completed asynchronously by this point. Since they both release the
  537. * kref, ctxt is _NOT_ guaranteed to be valid here.
  538. */
  539. return 0;
  540. }
  541. EXPORT_SYMBOL(usb_diag_write);
  542. static void diag_function_disable(struct usb_function *f)
  543. {
  544. struct diag_context *dev = func_to_diag(f);
  545. unsigned long flags;
  546. spin_lock_irqsave(&dev->lock, flags);
  547. dev->configured = 0;
  548. spin_unlock_irqrestore(&dev->lock, flags);
  549. if (dev->ch && dev->ch->notify)
  550. dev->ch->notify(dev->ch->priv, USB_DIAG_DISCONNECT, NULL);
  551. usb_ep_disable(dev->in);
  552. dev->in->driver_data = NULL;
  553. usb_ep_disable(dev->out);
  554. dev->out->driver_data = NULL;
  555. if (dev->ch)
  556. dev->ch->priv_usb = NULL;
  557. }
  558. static void diag_free_func(struct usb_function *f)
  559. {
  560. struct diag_context *ctxt = func_to_diag(f);
  561. unsigned long flags;
  562. spin_lock_irqsave(&ctxt->lock, flags);
  563. list_del(&ctxt->list_item);
  564. if (kref_put(&ctxt->kref, diag_context_release))
  565. /* diag_context_release called spin_unlock already */
  566. local_irq_restore(flags);
  567. else
  568. spin_unlock_irqrestore(&ctxt->lock, flags);
  569. }
  570. static int diag_function_set_alt(struct usb_function *f,
  571. unsigned int intf, unsigned int alt)
  572. {
  573. struct diag_context *dev = func_to_diag(f);
  574. struct usb_composite_dev *cdev = f->config->cdev;
  575. unsigned long flags;
  576. int rc = 0;
  577. if (config_ep_by_speed(cdev->gadget, f, dev->in) ||
  578. config_ep_by_speed(cdev->gadget, f, dev->out)) {
  579. dev->in->desc = NULL;
  580. dev->out->desc = NULL;
  581. return -EINVAL;
  582. }
  583. if (!dev->ch)
  584. return -ENODEV;
  585. /*
  586. * Indicate to the diag channel that the active diag device is dev.
  587. * Since a few diag devices can point to the same channel.
  588. */
  589. dev->ch->priv_usb = dev;
  590. dev->in->driver_data = dev;
  591. rc = usb_ep_enable(dev->in);
  592. if (rc) {
  593. ERROR(dev->cdev, "can't enable %s, result %d\n",
  594. dev->in->name, rc);
  595. return rc;
  596. }
  597. dev->out->driver_data = dev;
  598. rc = usb_ep_enable(dev->out);
  599. if (rc) {
  600. ERROR(dev->cdev, "can't enable %s, result %d\n",
  601. dev->out->name, rc);
  602. usb_ep_disable(dev->in);
  603. return rc;
  604. }
  605. dev->dpkts_tolaptop = 0;
  606. dev->dpkts_tomodem = 0;
  607. dev->dpkts_tolaptop_pending = 0;
  608. spin_lock_irqsave(&dev->lock, flags);
  609. dev->configured = 1;
  610. spin_unlock_irqrestore(&dev->lock, flags);
  611. if (dev->ch->notify)
  612. dev->ch->notify(dev->ch->priv, USB_DIAG_CONNECT, NULL);
  613. return rc;
  614. }
  615. static void diag_function_unbind(struct usb_configuration *c,
  616. struct usb_function *f)
  617. {
  618. struct diag_context *ctxt = func_to_diag(f);
  619. unsigned long flags;
  620. usb_free_all_descriptors(f);
  621. /*
  622. * Channel priv_usb may point to other diag function.
  623. * Clear the priv_usb only if the channel is used by the
  624. * diag dev we unbind here.
  625. */
  626. if (ctxt->ch && ctxt->ch->priv_usb == ctxt)
  627. ctxt->ch->priv_usb = NULL;
  628. spin_lock_irqsave(&ctxt->lock, flags);
  629. /* Free any pending USB requests from last session */
  630. free_reqs(ctxt);
  631. spin_unlock_irqrestore(&ctxt->lock, flags);
  632. }
  633. static int diag_function_bind(struct usb_configuration *c,
  634. struct usb_function *f)
  635. {
  636. struct usb_composite_dev *cdev = c->cdev;
  637. struct diag_context *ctxt = func_to_diag(f);
  638. struct usb_ep *ep;
  639. int status = -ENODEV;
  640. ctxt->cdev = c->cdev;
  641. intf_desc.bInterfaceNumber = usb_interface_id(c, f);
  642. ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
  643. if (!ep)
  644. goto fail;
  645. ctxt->in = ep;
  646. ep->driver_data = ctxt;
  647. ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
  648. if (!ep)
  649. goto fail;
  650. ctxt->out = ep;
  651. ep->driver_data = ctxt;
  652. hs_bulk_in_desc.bEndpointAddress =
  653. fs_bulk_in_desc.bEndpointAddress;
  654. hs_bulk_out_desc.bEndpointAddress =
  655. fs_bulk_out_desc.bEndpointAddress;
  656. ss_bulk_in_desc.bEndpointAddress =
  657. fs_bulk_in_desc.bEndpointAddress;
  658. ss_bulk_out_desc.bEndpointAddress =
  659. fs_bulk_out_desc.bEndpointAddress;
  660. status = usb_assign_descriptors(f, fs_diag_desc, hs_diag_desc,
  661. ss_diag_desc, ss_diag_desc);
  662. if (status)
  663. goto fail;
  664. /* Allow only first diag channel to update pid and serial no */
  665. if (ctxt == list_first_entry(&diag_dev_list,
  666. struct diag_context, list_item))
  667. diag_update_pid_and_serial_num(ctxt);
  668. return 0;
  669. fail:
  670. if (ctxt->out)
  671. ctxt->out->driver_data = NULL;
  672. if (ctxt->in)
  673. ctxt->in->driver_data = NULL;
  674. return status;
  675. }
  676. static struct diag_context *diag_context_init(const char *name)
  677. {
  678. struct diag_context *dev;
  679. struct usb_diag_ch *_ch;
  680. int found = 0;
  681. unsigned long flags;
  682. pr_debug("%s called for channel:%s\n", __func__, name);
  683. list_for_each_entry(_ch, &usb_diag_ch_list, list) {
  684. if (!strcmp(name, _ch->name)) {
  685. found = 1;
  686. break;
  687. }
  688. }
  689. if (!found) {
  690. pr_warn("%s: unable to get diag usb channel\n", __func__);
  691. _ch = kzalloc(sizeof(*_ch), GFP_KERNEL);
  692. if (_ch == NULL)
  693. return ERR_PTR(-ENOMEM);
  694. _ch->name = name;
  695. spin_lock_irqsave(&ch_lock, flags);
  696. list_add_tail(&_ch->list, &usb_diag_ch_list);
  697. spin_unlock_irqrestore(&ch_lock, flags);
  698. }
  699. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  700. if (!dev)
  701. return ERR_PTR(-ENOMEM);
  702. list_add_tail(&dev->list_item, &diag_dev_list);
  703. /*
  704. * A few diag devices can point to the same channel, in case that
  705. * the diag devices belong to different configurations, however
  706. * only the active diag device will claim the channel by setting
  707. * the ch->priv_usb (see diag_function_set_alt).
  708. */
  709. dev->ch = _ch;
  710. dev->function.name = _ch->name;
  711. dev->function.bind = diag_function_bind;
  712. dev->function.unbind = diag_function_unbind;
  713. dev->function.set_alt = diag_function_set_alt;
  714. dev->function.disable = diag_function_disable;
  715. dev->function.free_func = diag_free_func;
  716. kref_init(&dev->kref);
  717. spin_lock_init(&dev->lock);
  718. INIT_LIST_HEAD(&dev->read_pool);
  719. INIT_LIST_HEAD(&dev->write_pool);
  720. return dev;
  721. }
  722. #if defined(CONFIG_DEBUG_FS)
  723. static char debug_buffer[PAGE_SIZE];
  724. static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
  725. size_t count, loff_t *ppos)
  726. {
  727. char *buf = debug_buffer;
  728. int temp = 0;
  729. struct usb_diag_ch *ch;
  730. list_for_each_entry(ch, &usb_diag_ch_list, list) {
  731. struct diag_context *ctxt = ch->priv_usb;
  732. unsigned long flags;
  733. if (ctxt) {
  734. spin_lock_irqsave(&ctxt->lock, flags);
  735. temp += scnprintf(buf + temp, PAGE_SIZE - temp,
  736. "---Name: %s---\n"
  737. "endpoints: %s, %s\n"
  738. "dpkts_tolaptop: %lu\n"
  739. "dpkts_tomodem: %lu\n"
  740. "pkts_tolaptop_pending: %u\n",
  741. ch->name,
  742. ctxt->in->name, ctxt->out->name,
  743. ctxt->dpkts_tolaptop,
  744. ctxt->dpkts_tomodem,
  745. ctxt->dpkts_tolaptop_pending);
  746. spin_unlock_irqrestore(&ctxt->lock, flags);
  747. }
  748. }
  749. return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
  750. }
  751. static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
  752. size_t count, loff_t *ppos)
  753. {
  754. struct usb_diag_ch *ch;
  755. list_for_each_entry(ch, &usb_diag_ch_list, list) {
  756. struct diag_context *ctxt = ch->priv_usb;
  757. unsigned long flags;
  758. if (ctxt) {
  759. spin_lock_irqsave(&ctxt->lock, flags);
  760. ctxt->dpkts_tolaptop = 0;
  761. ctxt->dpkts_tomodem = 0;
  762. ctxt->dpkts_tolaptop_pending = 0;
  763. spin_unlock_irqrestore(&ctxt->lock, flags);
  764. }
  765. }
  766. return count;
  767. }
  768. static int debug_open(struct inode *inode, struct file *file)
  769. {
  770. return 0;
  771. }
  772. static const struct file_operations debug_fdiag_ops = {
  773. .open = debug_open,
  774. .read = debug_read_stats,
  775. .write = debug_reset_stats,
  776. };
  777. struct dentry *dent_diag;
  778. static void fdiag_debugfs_init(void)
  779. {
  780. struct dentry *dent_diag_status;
  781. dent_diag = debugfs_create_dir("usb_diag", 0);
  782. if (!dent_diag || IS_ERR(dent_diag))
  783. return;
  784. dent_diag_status = debugfs_create_file("status", 0444, dent_diag, 0,
  785. &debug_fdiag_ops);
  786. if (!dent_diag_status || IS_ERR(dent_diag_status)) {
  787. debugfs_remove(dent_diag);
  788. dent_diag = NULL;
  789. return;
  790. }
  791. }
  792. static void fdiag_debugfs_remove(void)
  793. {
  794. debugfs_remove_recursive(dent_diag);
  795. }
  796. #else
  797. static inline void fdiag_debugfs_init(void) {}
  798. static inline void fdiag_debugfs_remove(void) {}
  799. #endif
  800. static void diag_opts_release(struct config_item *item)
  801. {
  802. struct diag_opts *opts = to_diag_opts(item);
  803. usb_put_function_instance(&opts->func_inst);
  804. }
  805. static struct configfs_item_operations diag_item_ops = {
  806. .release = diag_opts_release,
  807. };
  808. static ssize_t diag_pid_show(struct config_item *item, char *page)
  809. {
  810. struct dload_struct local_dload_struct;
  811. if (!diag_dload) {
  812. pr_warn("%s: diag_dload mem region not defined\n", __func__);
  813. return -EINVAL;
  814. }
  815. memcpy_fromio(&local_dload_struct.pid, &diag_dload->pid,
  816. sizeof(local_dload_struct.pid));
  817. return scnprintf(page, PAGE_SIZE, "%x\n", local_dload_struct.pid);
  818. }
  819. static ssize_t diag_pid_store(struct config_item *item, const char *page,
  820. size_t len)
  821. {
  822. int ret;
  823. u32 pid;
  824. if (!diag_dload) {
  825. pr_warn("%s: diag_dload mem region not defined\n", __func__);
  826. return 0;
  827. }
  828. ret = kstrtou32(page, 0, &pid);
  829. if (ret)
  830. return ret;
  831. memcpy_toio(&diag_dload->pid, &pid, sizeof(pid));
  832. pid = PID_MAGIC_ID;
  833. memcpy_toio(&diag_dload->pid_magic, &pid, sizeof(pid));
  834. return len;
  835. }
  836. CONFIGFS_ATTR(diag_, pid);
  837. static ssize_t diag_serial_show(struct config_item *item, char *page)
  838. {
  839. struct dload_struct local_dload_struct;
  840. if (!diag_dload) {
  841. pr_warn("%s: diag_dload mem region not defined\n", __func__);
  842. return -EINVAL;
  843. }
  844. memcpy_fromio(&local_dload_struct.serial_number,
  845. &diag_dload->serial_number,
  846. SERIAL_NUMBER_LENGTH);
  847. return scnprintf(page, PAGE_SIZE, "%s\n",
  848. local_dload_struct.serial_number);
  849. }
  850. static ssize_t diag_serial_store(struct config_item *item, const char *page,
  851. size_t len)
  852. {
  853. u32 magic;
  854. char *p;
  855. char serial_number[SERIAL_NUMBER_LENGTH] = {0};
  856. if (!diag_dload) {
  857. pr_warn("%s: diag_dload mem region not defined\n", __func__);
  858. return 0;
  859. }
  860. strscpy(serial_number, page, SERIAL_NUMBER_LENGTH);
  861. p = strnchr(serial_number, SERIAL_NUMBER_LENGTH, '\n');
  862. if (p)
  863. *p = '\0';
  864. memcpy_toio(&diag_dload->serial_number, serial_number,
  865. SERIAL_NUMBER_LENGTH);
  866. magic = SERIAL_NUM_MAGIC_ID;
  867. memcpy_toio(&diag_dload->serial_magic, &magic, sizeof(magic));
  868. return len;
  869. }
  870. CONFIGFS_ATTR(diag_, serial);
  871. static struct configfs_attribute *diag_attrs[] = {
  872. &diag_attr_pid,
  873. &diag_attr_serial,
  874. NULL,
  875. };
  876. static struct config_item_type diag_func_type = {
  877. .ct_item_ops = &diag_item_ops,
  878. .ct_attrs = diag_attrs,
  879. .ct_owner = THIS_MODULE,
  880. };
  881. static int diag_set_inst_name(struct usb_function_instance *fi,
  882. const char *name)
  883. {
  884. struct diag_opts *opts = container_of(fi, struct diag_opts, func_inst);
  885. char *ptr;
  886. int name_len;
  887. name_len = strlen(name) + 1;
  888. if (name_len > MAX_INST_NAME_LEN)
  889. return -ENAMETOOLONG;
  890. ptr = kstrndup(name, name_len, GFP_KERNEL);
  891. if (!ptr)
  892. return -ENOMEM;
  893. opts->name = ptr;
  894. return 0;
  895. }
  896. static void diag_free_inst(struct usb_function_instance *f)
  897. {
  898. struct diag_opts *opts;
  899. opts = container_of(f, struct diag_opts, func_inst);
  900. kfree(opts->name);
  901. kfree(opts);
  902. }
  903. static struct usb_function_instance *diag_alloc_inst(void)
  904. {
  905. struct diag_opts *opts;
  906. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  907. if (!opts)
  908. return ERR_PTR(-ENOMEM);
  909. opts->func_inst.set_inst_name = diag_set_inst_name;
  910. opts->func_inst.free_func_inst = diag_free_inst;
  911. config_group_init_type_name(&opts->func_inst.group, "",
  912. &diag_func_type);
  913. return &opts->func_inst;
  914. }
  915. static struct usb_function *diag_alloc(struct usb_function_instance *fi)
  916. {
  917. struct diag_opts *opts;
  918. struct diag_context *dev;
  919. opts = container_of(fi, struct diag_opts, func_inst);
  920. dev = diag_context_init(opts->name);
  921. if (IS_ERR(dev))
  922. return ERR_CAST(dev);
  923. return &dev->function;
  924. }
  925. DECLARE_USB_FUNCTION(diag, diag_alloc_inst, diag_alloc);
  926. static int __init diag_init(void)
  927. {
  928. struct device_node *np;
  929. int ret;
  930. INIT_LIST_HEAD(&diag_dev_list);
  931. fdiag_debugfs_init();
  932. ret = usb_function_register(&diagusb_func);
  933. if (ret) {
  934. pr_err("%s: failed to register diag %d\n", __func__, ret);
  935. return ret;
  936. }
  937. np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-diag-dload");
  938. if (!np)
  939. np = of_find_compatible_node(NULL, NULL, "qcom,android-usb");
  940. if (!np)
  941. pr_warn("diag: failed to find diag_dload imem node\n");
  942. diag_dload = np ? of_iomap(np, 0) : NULL;
  943. return ret;
  944. }
  945. static void __exit diag_exit(void)
  946. {
  947. struct list_head *act, *tmp;
  948. struct usb_diag_ch *_ch;
  949. unsigned long flags;
  950. if (diag_dload)
  951. iounmap(diag_dload);
  952. usb_function_unregister(&diagusb_func);
  953. fdiag_debugfs_remove();
  954. list_for_each_safe(act, tmp, &usb_diag_ch_list) {
  955. _ch = list_entry(act, struct usb_diag_ch, list);
  956. spin_lock_irqsave(&ch_lock, flags);
  957. /* Free if diagchar is not using the channel anymore */
  958. if (!_ch->priv) {
  959. list_del(&_ch->list);
  960. kfree(_ch);
  961. }
  962. spin_unlock_irqrestore(&ch_lock, flags);
  963. }
  964. }
  965. module_init(diag_init);
  966. module_exit(diag_exit);
  967. MODULE_DESCRIPTION("Diag function driver");
  968. MODULE_LICENSE("GPL");