f_ccid.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * f_ccid.c -- CCID function Driver
  4. *
  5. * Copyright (c) 2011, 2013, 2017, 2019 The Linux Foundation. All rights reserved.
  6. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  7. */
  8. #include <linux/slab.h>
  9. #include <linux/kernel.h>
  10. #include <linux/device.h>
  11. #include <linux/fs.h>
  12. #include <linux/module.h>
  13. #include <linux/usb/ccid_desc.h>
  14. #include <linux/usb/composite.h>
  15. #include <linux/cdev.h>
  16. #include <linux/uaccess.h>
  17. #include "f_ccid.h"
  18. #define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header)
  19. #define BULK_OUT_BUFFER_SIZE 1024
  20. #define CTRL_BUF_SIZE 4
  21. #define FUNCTION_NAME "ccid"
  22. #define MAX_INST_NAME_LEN 40
  23. #define CCID_CTRL_DEV_NAME "ccid_ctrl"
  24. #define CCID_BULK_DEV_NAME "ccid_bulk"
  25. #define CCID_NOTIFY_INTERVAL 5
  26. #define CCID_NOTIFY_MAXPACKET 4
  27. /* number of tx requests to allocate */
  28. #define TX_REQ_MAX 4
  29. struct ccid_ctrl_dev {
  30. atomic_t opened;
  31. struct list_head tx_q;
  32. wait_queue_head_t tx_wait_q;
  33. unsigned char buf[CTRL_BUF_SIZE];
  34. int tx_ctrl_done;
  35. struct cdev cdev;
  36. };
  37. struct ccid_bulk_dev {
  38. atomic_t error;
  39. atomic_t opened;
  40. atomic_t rx_req_busy;
  41. wait_queue_head_t read_wq;
  42. wait_queue_head_t write_wq;
  43. struct usb_request *rx_req;
  44. int rx_done;
  45. struct list_head tx_idle;
  46. struct cdev cdev;
  47. };
  48. struct ccid_opts {
  49. struct usb_function_instance func_inst;
  50. struct f_ccid *ccid;
  51. };
  52. struct f_ccid {
  53. struct usb_function function;
  54. int ifc_id;
  55. spinlock_t lock;
  56. atomic_t online;
  57. /* usb eps*/
  58. struct usb_ep *notify;
  59. struct usb_ep *in;
  60. struct usb_ep *out;
  61. struct usb_request *notify_req;
  62. struct ccid_ctrl_dev ctrl_dev;
  63. struct ccid_bulk_dev bulk_dev;
  64. int dtr_state;
  65. };
  66. #define MAX_INSTANCES 4
  67. static int major;
  68. static struct class *ccid_class;
  69. static DEFINE_IDA(ccid_ida);
  70. static DEFINE_MUTEX(ccid_ida_lock);
  71. static inline struct f_ccid *ctrl_dev_to_ccid(struct ccid_ctrl_dev *d)
  72. {
  73. return container_of(d, struct f_ccid, ctrl_dev);
  74. }
  75. static inline struct f_ccid *bulk_dev_to_ccid(struct ccid_bulk_dev *d)
  76. {
  77. return container_of(d, struct f_ccid, bulk_dev);
  78. }
  79. /* Interface Descriptor: */
  80. static struct usb_interface_descriptor ccid_interface_desc = {
  81. .bLength = USB_DT_INTERFACE_SIZE,
  82. .bDescriptorType = USB_DT_INTERFACE,
  83. .bNumEndpoints = 3,
  84. .bInterfaceClass = USB_CLASS_CSCID,
  85. .bInterfaceSubClass = 0,
  86. .bInterfaceProtocol = 0,
  87. };
  88. /* CCID Class Descriptor */
  89. static struct usb_ccid_class_descriptor ccid_class_desc = {
  90. .bLength = sizeof(ccid_class_desc),
  91. .bDescriptorType = CCID_DECRIPTOR_TYPE,
  92. .bcdCCID = CCID1_10,
  93. .bMaxSlotIndex = 0,
  94. /* This value indicates what voltages the CCID can supply to slots */
  95. .bVoltageSupport = VOLTS_3_0,
  96. .dwProtocols = PROTOCOL_TO,
  97. /* Default ICC clock frequency in KHz */
  98. .dwDefaultClock = 3580,
  99. /* Maximum supported ICC clock frequency in KHz */
  100. .dwMaximumClock = 3580,
  101. .bNumClockSupported = 0,
  102. /* Default ICC I/O data rate in bps */
  103. .dwDataRate = 9600,
  104. /* Maximum supported ICC I/O data rate in bps */
  105. .dwMaxDataRate = 9600,
  106. .bNumDataRatesSupported = 0,
  107. .dwMaxIFSD = 0,
  108. .dwSynchProtocols = 0,
  109. .dwMechanical = 0,
  110. /* This value indicates what intelligent features the CCID has */
  111. .dwFeatures = CCID_FEATURES_EXC_SAPDU |
  112. CCID_FEATURES_AUTO_PNEGO |
  113. CCID_FEATURES_AUTO_BAUD |
  114. CCID_FEATURES_AUTO_CLOCK |
  115. CCID_FEATURES_AUTO_VOLT |
  116. CCID_FEATURES_AUTO_ACTIV |
  117. CCID_FEATURES_AUTO_PCONF,
  118. /* extended APDU level Message Length */
  119. .dwMaxCCIDMessageLength = 0x200,
  120. .bClassGetResponse = 0x0,
  121. .bClassEnvelope = 0x0,
  122. .wLcdLayout = 0,
  123. .bPINSupport = 0,
  124. .bMaxCCIDBusySlots = 1
  125. };
  126. /* Full speed support: */
  127. static struct usb_endpoint_descriptor ccid_fs_notify_desc = {
  128. .bLength = USB_DT_ENDPOINT_SIZE,
  129. .bDescriptorType = USB_DT_ENDPOINT,
  130. .bEndpointAddress = USB_DIR_IN,
  131. .bmAttributes = USB_ENDPOINT_XFER_INT,
  132. .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
  133. .bInterval = 1 << CCID_NOTIFY_INTERVAL,
  134. };
  135. static struct usb_endpoint_descriptor ccid_fs_in_desc = {
  136. .bLength = USB_DT_ENDPOINT_SIZE,
  137. .bDescriptorType = USB_DT_ENDPOINT,
  138. .bEndpointAddress = USB_DIR_IN,
  139. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  140. .wMaxPacketSize = cpu_to_le16(64),
  141. };
  142. static struct usb_endpoint_descriptor ccid_fs_out_desc = {
  143. .bLength = USB_DT_ENDPOINT_SIZE,
  144. .bDescriptorType = USB_DT_ENDPOINT,
  145. .bEndpointAddress = USB_DIR_OUT,
  146. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  147. .wMaxPacketSize = cpu_to_le16(64),
  148. };
  149. static struct usb_descriptor_header *ccid_fs_descs[] = {
  150. (struct usb_descriptor_header *) &ccid_interface_desc,
  151. (struct usb_descriptor_header *) &ccid_class_desc,
  152. (struct usb_descriptor_header *) &ccid_fs_notify_desc,
  153. (struct usb_descriptor_header *) &ccid_fs_in_desc,
  154. (struct usb_descriptor_header *) &ccid_fs_out_desc,
  155. NULL,
  156. };
  157. /* High speed support: */
  158. static struct usb_endpoint_descriptor ccid_hs_notify_desc = {
  159. .bLength = USB_DT_ENDPOINT_SIZE,
  160. .bDescriptorType = USB_DT_ENDPOINT,
  161. .bEndpointAddress = USB_DIR_IN,
  162. .bmAttributes = USB_ENDPOINT_XFER_INT,
  163. .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
  164. .bInterval = CCID_NOTIFY_INTERVAL + 4,
  165. };
  166. static struct usb_endpoint_descriptor ccid_hs_in_desc = {
  167. .bLength = USB_DT_ENDPOINT_SIZE,
  168. .bDescriptorType = USB_DT_ENDPOINT,
  169. .bEndpointAddress = USB_DIR_IN,
  170. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  171. .wMaxPacketSize = cpu_to_le16(512),
  172. };
  173. static struct usb_endpoint_descriptor ccid_hs_out_desc = {
  174. .bLength = USB_DT_ENDPOINT_SIZE,
  175. .bDescriptorType = USB_DT_ENDPOINT,
  176. .bEndpointAddress = USB_DIR_OUT,
  177. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  178. .wMaxPacketSize = cpu_to_le16(512),
  179. };
  180. static struct usb_descriptor_header *ccid_hs_descs[] = {
  181. (struct usb_descriptor_header *) &ccid_interface_desc,
  182. (struct usb_descriptor_header *) &ccid_class_desc,
  183. (struct usb_descriptor_header *) &ccid_hs_notify_desc,
  184. (struct usb_descriptor_header *) &ccid_hs_in_desc,
  185. (struct usb_descriptor_header *) &ccid_hs_out_desc,
  186. NULL,
  187. };
  188. /* Super speed support: */
  189. static struct usb_endpoint_descriptor ccid_ss_notify_desc = {
  190. .bLength = USB_DT_ENDPOINT_SIZE,
  191. .bDescriptorType = USB_DT_ENDPOINT,
  192. .bEndpointAddress = USB_DIR_IN,
  193. .bmAttributes = USB_ENDPOINT_XFER_INT,
  194. .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
  195. .bInterval = CCID_NOTIFY_INTERVAL + 4,
  196. };
  197. static struct usb_ss_ep_comp_descriptor ccid_ss_notify_comp_desc = {
  198. .bLength = sizeof(ccid_ss_notify_comp_desc),
  199. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  200. /* the following 2 values can be tweaked if necessary */
  201. /* .bMaxBurst = 0, */
  202. /* .bmAttributes = 0, */
  203. };
  204. static struct usb_endpoint_descriptor ccid_ss_in_desc = {
  205. .bLength = USB_DT_ENDPOINT_SIZE,
  206. .bDescriptorType = USB_DT_ENDPOINT,
  207. .bEndpointAddress = USB_DIR_IN,
  208. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  209. .wMaxPacketSize = cpu_to_le16(1024),
  210. };
  211. static struct usb_ss_ep_comp_descriptor ccid_ss_in_comp_desc = {
  212. .bLength = sizeof(ccid_ss_in_comp_desc),
  213. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  214. /* the following 2 values can be tweaked if necessary */
  215. /* .bMaxBurst = 0, */
  216. /* .bmAttributes = 0, */
  217. };
  218. static struct usb_endpoint_descriptor ccid_ss_out_desc = {
  219. .bLength = USB_DT_ENDPOINT_SIZE,
  220. .bDescriptorType = USB_DT_ENDPOINT,
  221. .bEndpointAddress = USB_DIR_OUT,
  222. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  223. .wMaxPacketSize = cpu_to_le16(1024),
  224. };
  225. static struct usb_ss_ep_comp_descriptor ccid_ss_out_comp_desc = {
  226. .bLength = sizeof(ccid_ss_out_comp_desc),
  227. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  228. /* the following 2 values can be tweaked if necessary */
  229. /* .bMaxBurst = 0, */
  230. /* .bmAttributes = 0, */
  231. };
  232. static struct usb_descriptor_header *ccid_ss_descs[] = {
  233. (struct usb_descriptor_header *) &ccid_interface_desc,
  234. (struct usb_descriptor_header *) &ccid_class_desc,
  235. (struct usb_descriptor_header *) &ccid_ss_notify_desc,
  236. (struct usb_descriptor_header *) &ccid_ss_notify_comp_desc,
  237. (struct usb_descriptor_header *) &ccid_ss_in_desc,
  238. (struct usb_descriptor_header *) &ccid_ss_in_comp_desc,
  239. (struct usb_descriptor_header *) &ccid_ss_out_desc,
  240. (struct usb_descriptor_header *) &ccid_ss_out_comp_desc,
  241. NULL,
  242. };
  243. static inline struct f_ccid *func_to_ccid(struct usb_function *f)
  244. {
  245. return container_of(f, struct f_ccid, function);
  246. }
  247. static void ccid_req_put(struct f_ccid *ccid_dev, struct list_head *head,
  248. struct usb_request *req)
  249. {
  250. unsigned long flags;
  251. spin_lock_irqsave(&ccid_dev->lock, flags);
  252. list_add_tail(&req->list, head);
  253. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  254. }
  255. static struct usb_request *ccid_req_get(struct f_ccid *ccid_dev,
  256. struct list_head *head)
  257. {
  258. unsigned long flags;
  259. struct usb_request *req = NULL;
  260. spin_lock_irqsave(&ccid_dev->lock, flags);
  261. if (!list_empty(head)) {
  262. req = list_first_entry(head, struct usb_request, list);
  263. list_del(&req->list);
  264. }
  265. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  266. return req;
  267. }
  268. static void ccid_notify_complete(struct usb_ep *ep, struct usb_request *req)
  269. {
  270. switch (req->status) {
  271. case -ECONNRESET:
  272. case -ESHUTDOWN:
  273. case 0:
  274. break;
  275. default:
  276. pr_err("CCID notify ep error %d\n", req->status);
  277. }
  278. }
  279. static void ccid_bulk_complete_in(struct usb_ep *ep, struct usb_request *req)
  280. {
  281. struct f_ccid *ccid_dev = req->context;
  282. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  283. if (req->status != 0)
  284. atomic_set(&bulk_dev->error, 1);
  285. ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
  286. wake_up(&bulk_dev->write_wq);
  287. }
  288. static void ccid_bulk_complete_out(struct usb_ep *ep, struct usb_request *req)
  289. {
  290. struct f_ccid *ccid_dev = req->context;
  291. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  292. if (req->status != 0)
  293. atomic_set(&bulk_dev->error, 1);
  294. bulk_dev->rx_done = 1;
  295. wake_up(&bulk_dev->read_wq);
  296. }
  297. static struct usb_request *
  298. ccid_request_alloc(struct usb_ep *ep, size_t len, gfp_t kmalloc_flags)
  299. {
  300. struct usb_request *req;
  301. req = usb_ep_alloc_request(ep, kmalloc_flags);
  302. if (req != NULL) {
  303. req->length = len;
  304. req->buf = kmalloc(len, kmalloc_flags);
  305. if (req->buf == NULL) {
  306. usb_ep_free_request(ep, req);
  307. req = NULL;
  308. }
  309. }
  310. return req ? req : ERR_PTR(-ENOMEM);
  311. }
  312. static void ccid_request_free(struct usb_request *req, struct usb_ep *ep)
  313. {
  314. if (req) {
  315. kfree(req->buf);
  316. usb_ep_free_request(ep, req);
  317. }
  318. }
  319. static int
  320. ccid_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
  321. {
  322. struct f_ccid *ccid_dev = container_of(f, struct f_ccid, function);
  323. struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
  324. struct usb_composite_dev *cdev = f->config->cdev;
  325. struct usb_request *req = cdev->req;
  326. int ret = -EOPNOTSUPP;
  327. u16 w_index = le16_to_cpu(ctrl->wIndex);
  328. u16 w_value = le16_to_cpu(ctrl->wValue);
  329. u16 w_length = le16_to_cpu(ctrl->wLength);
  330. if (!atomic_read(&ccid_dev->online))
  331. return -ENOTCONN;
  332. switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
  333. case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  334. | CCIDGENERICREQ_ABORT:
  335. if (w_length != 0)
  336. goto invalid;
  337. ctrl_dev->buf[0] = CCIDGENERICREQ_ABORT;
  338. ctrl_dev->buf[1] = w_value & 0xFF;
  339. ctrl_dev->buf[2] = (w_value >> 8) & 0xFF;
  340. ctrl_dev->buf[3] = 0x00;
  341. ctrl_dev->tx_ctrl_done = 1;
  342. wake_up(&ctrl_dev->tx_wait_q);
  343. ret = 0;
  344. break;
  345. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  346. | CCIDGENERICREQ_GET_CLOCK_FREQUENCIES:
  347. *(u32 *) req->buf =
  348. cpu_to_le32(ccid_class_desc.dwDefaultClock);
  349. ret = min_t(u32, w_length,
  350. sizeof(ccid_class_desc.dwDefaultClock));
  351. break;
  352. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  353. | CCIDGENERICREQ_GET_DATA_RATES:
  354. *(u32 *) req->buf = cpu_to_le32(ccid_class_desc.dwDataRate);
  355. ret = min_t(u32, w_length, sizeof(ccid_class_desc.dwDataRate));
  356. break;
  357. default:
  358. invalid:
  359. pr_debug("invalid control req%02x.%02x v%04x i%04x l%d\n",
  360. ctrl->bRequestType, ctrl->bRequest,
  361. w_value, w_index, w_length);
  362. }
  363. /* respond with data transfer or status phase? */
  364. if (ret >= 0) {
  365. pr_debug("ccid req%02x.%02x v%04x i%04x l%d\n",
  366. ctrl->bRequestType, ctrl->bRequest,
  367. w_value, w_index, w_length);
  368. req->length = ret;
  369. ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
  370. if (ret < 0)
  371. pr_err("ccid ep0 enqueue err %d\n", ret);
  372. }
  373. return ret;
  374. }
  375. static void ccid_function_disable(struct usb_function *f)
  376. {
  377. struct f_ccid *ccid_dev = func_to_ccid(f);
  378. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  379. struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
  380. /* Disable endpoints */
  381. usb_ep_disable(ccid_dev->notify);
  382. usb_ep_disable(ccid_dev->in);
  383. usb_ep_disable(ccid_dev->out);
  384. ccid_dev->dtr_state = 0;
  385. atomic_set(&ccid_dev->online, 0);
  386. /* Wake up threads */
  387. wake_up(&bulk_dev->write_wq);
  388. wake_up(&bulk_dev->read_wq);
  389. wake_up(&ctrl_dev->tx_wait_q);
  390. }
  391. static int
  392. ccid_function_set_alt(struct usb_function *f, unsigned int intf,
  393. unsigned int alt)
  394. {
  395. struct f_ccid *ccid_dev = func_to_ccid(f);
  396. struct usb_composite_dev *cdev = f->config->cdev;
  397. int ret = 0;
  398. /* choose the descriptors and enable endpoints */
  399. ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->notify);
  400. if (ret) {
  401. ccid_dev->notify->desc = NULL;
  402. pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
  403. __func__, ccid_dev->notify->name, ret);
  404. return ret;
  405. }
  406. ret = usb_ep_enable(ccid_dev->notify);
  407. if (ret) {
  408. pr_err("%s: usb ep#%s enable failed, err#%d\n",
  409. __func__, ccid_dev->notify->name, ret);
  410. return ret;
  411. }
  412. ccid_dev->notify->driver_data = ccid_dev;
  413. ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->in);
  414. if (ret) {
  415. ccid_dev->in->desc = NULL;
  416. pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
  417. __func__, ccid_dev->in->name, ret);
  418. goto disable_ep_notify;
  419. }
  420. ret = usb_ep_enable(ccid_dev->in);
  421. if (ret) {
  422. pr_err("%s: usb ep#%s enable failed, err#%d\n",
  423. __func__, ccid_dev->in->name, ret);
  424. goto disable_ep_notify;
  425. }
  426. ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->out);
  427. if (ret) {
  428. ccid_dev->out->desc = NULL;
  429. pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
  430. __func__, ccid_dev->out->name, ret);
  431. goto disable_ep_in;
  432. }
  433. ret = usb_ep_enable(ccid_dev->out);
  434. if (ret) {
  435. pr_err("%s: usb ep#%s enable failed, err#%d\n",
  436. __func__, ccid_dev->out->name, ret);
  437. goto disable_ep_in;
  438. }
  439. ccid_dev->dtr_state = 1;
  440. atomic_set(&ccid_dev->online, 1);
  441. return ret;
  442. disable_ep_in:
  443. usb_ep_disable(ccid_dev->in);
  444. disable_ep_notify:
  445. usb_ep_disable(ccid_dev->notify);
  446. ccid_dev->notify->driver_data = NULL;
  447. return ret;
  448. }
  449. static void ccid_function_unbind(struct usb_configuration *c,
  450. struct usb_function *f)
  451. {
  452. struct f_ccid *ccid_dev = func_to_ccid(f);
  453. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  454. struct usb_request *req;
  455. /* Free endpoint related requests */
  456. ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
  457. if (!atomic_read(&bulk_dev->rx_req_busy))
  458. ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
  459. while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
  460. ccid_request_free(req, ccid_dev->in);
  461. usb_free_all_descriptors(f);
  462. }
  463. static int ccid_function_bind(struct usb_configuration *c,
  464. struct usb_function *f)
  465. {
  466. struct f_ccid *ccid_dev = func_to_ccid(f);
  467. struct usb_ep *ep;
  468. struct usb_composite_dev *cdev = c->cdev;
  469. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  470. struct usb_request *req;
  471. int ret = -ENODEV;
  472. int i;
  473. ccid_dev->ifc_id = usb_interface_id(c, f);
  474. if (ccid_dev->ifc_id < 0) {
  475. pr_err("%s: unable to allocate ifc id, err:%d\n",
  476. __func__, ccid_dev->ifc_id);
  477. return ccid_dev->ifc_id;
  478. }
  479. ccid_interface_desc.bInterfaceNumber = ccid_dev->ifc_id;
  480. ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_notify_desc);
  481. if (!ep) {
  482. pr_err("%s: usb epnotify autoconfig failed\n", __func__);
  483. return -ENODEV;
  484. }
  485. ccid_dev->notify = ep;
  486. ep->driver_data = cdev;
  487. ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_in_desc);
  488. if (!ep) {
  489. pr_err("%s: usb epin autoconfig failed\n", __func__);
  490. ret = -ENODEV;
  491. goto ep_auto_in_fail;
  492. }
  493. ccid_dev->in = ep;
  494. ep->driver_data = cdev;
  495. ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_out_desc);
  496. if (!ep) {
  497. pr_err("%s: usb epout autoconfig failed\n", __func__);
  498. ret = -ENODEV;
  499. goto ep_auto_out_fail;
  500. }
  501. ccid_dev->out = ep;
  502. ep->driver_data = cdev;
  503. /*
  504. * support all relevant hardware speeds... we expect that when
  505. * hardware is dual speed, all bulk-capable endpoints work at
  506. * both speeds
  507. */
  508. ccid_hs_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
  509. ccid_hs_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
  510. ccid_hs_notify_desc.bEndpointAddress =
  511. ccid_fs_notify_desc.bEndpointAddress;
  512. ccid_ss_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
  513. ccid_ss_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
  514. ccid_ss_notify_desc.bEndpointAddress =
  515. ccid_fs_notify_desc.bEndpointAddress;
  516. ret = usb_assign_descriptors(f, ccid_fs_descs, ccid_hs_descs,
  517. ccid_ss_descs, ccid_ss_descs);
  518. if (ret)
  519. goto assign_desc_fail;
  520. pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
  521. gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
  522. ccid_dev->in->name, ccid_dev->out->name);
  523. ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify,
  524. sizeof(struct usb_ccid_notification), GFP_KERNEL);
  525. if (IS_ERR(ccid_dev->notify_req)) {
  526. pr_err("%s: unable to allocate memory for notify req\n",
  527. __func__);
  528. goto notify_alloc_fail;
  529. }
  530. ccid_dev->notify_req->complete = ccid_notify_complete;
  531. ccid_dev->notify_req->context = ccid_dev;
  532. /* now allocate requests for our endpoints */
  533. req = ccid_request_alloc(ccid_dev->out, BULK_OUT_BUFFER_SIZE,
  534. GFP_KERNEL);
  535. if (IS_ERR(req)) {
  536. pr_err("%s: unable to allocate memory for out req\n",
  537. __func__);
  538. ret = PTR_ERR(req);
  539. goto out_alloc_fail;
  540. }
  541. req->complete = ccid_bulk_complete_out;
  542. req->context = ccid_dev;
  543. bulk_dev->rx_req = req;
  544. for (i = 0; i < TX_REQ_MAX; i++) {
  545. req = ccid_request_alloc(ccid_dev->in, BULK_IN_BUFFER_SIZE,
  546. GFP_KERNEL);
  547. if (IS_ERR(req)) {
  548. pr_err("%s: unable to allocate memory for in req\n",
  549. __func__);
  550. ret = PTR_ERR(req);
  551. goto in_alloc_fail;
  552. }
  553. req->complete = ccid_bulk_complete_in;
  554. req->context = ccid_dev;
  555. ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
  556. }
  557. return 0;
  558. in_alloc_fail:
  559. ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
  560. out_alloc_fail:
  561. ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
  562. notify_alloc_fail:
  563. usb_free_all_descriptors(f);
  564. assign_desc_fail:
  565. ccid_dev->out->driver_data = NULL;
  566. ccid_dev->out = NULL;
  567. ep_auto_out_fail:
  568. ccid_dev->in->driver_data = NULL;
  569. ccid_dev->in = NULL;
  570. ep_auto_in_fail:
  571. ccid_dev->notify->driver_data = NULL;
  572. ccid_dev->notify = NULL;
  573. return ret;
  574. }
  575. static int ccid_bulk_open(struct inode *inode, struct file *fp)
  576. {
  577. struct ccid_bulk_dev *bulk_dev = container_of(inode->i_cdev,
  578. struct ccid_bulk_dev, cdev);
  579. struct f_ccid *ccid_dev = bulk_dev_to_ccid(bulk_dev);
  580. unsigned long flags;
  581. if (!atomic_read(&ccid_dev->online)) {
  582. pr_debug("%s: USB cable not connected\n", __func__);
  583. return -ENODEV;
  584. }
  585. if (atomic_read(&bulk_dev->opened)) {
  586. pr_debug("%s: bulk device is already opened\n", __func__);
  587. return -EBUSY;
  588. }
  589. atomic_set(&bulk_dev->opened, 1);
  590. /* clear the error latch */
  591. atomic_set(&bulk_dev->error, 0);
  592. spin_lock_irqsave(&ccid_dev->lock, flags);
  593. fp->private_data = ccid_dev;
  594. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  595. return 0;
  596. }
  597. static int ccid_bulk_release(struct inode *ip, struct file *fp)
  598. {
  599. struct f_ccid *ccid_dev = fp->private_data;
  600. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  601. atomic_set(&bulk_dev->opened, 0);
  602. return 0;
  603. }
  604. static ssize_t ccid_bulk_read(struct file *fp, char __user *buf,
  605. size_t count, loff_t *pos)
  606. {
  607. struct f_ccid *ccid_dev = fp->private_data;
  608. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  609. struct usb_request *req;
  610. int r = count, xfer, len;
  611. int ret;
  612. unsigned long flags;
  613. pr_debug("%s: %zu bytes\n", __func__, count);
  614. if (count > BULK_OUT_BUFFER_SIZE) {
  615. pr_err("%s: max_buffer_size:%d given_pkt_size:%zu\n",
  616. __func__, BULK_OUT_BUFFER_SIZE, count);
  617. return -ENOMEM;
  618. }
  619. if (atomic_read(&bulk_dev->error)) {
  620. r = -EIO;
  621. pr_err("%s bulk_dev_error\n", __func__);
  622. goto done;
  623. }
  624. len = ALIGN(count, ccid_dev->out->maxpacket);
  625. requeue_req:
  626. spin_lock_irqsave(&ccid_dev->lock, flags);
  627. if (!atomic_read(&ccid_dev->online)) {
  628. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  629. pr_debug("%s: USB cable not connected\n", __func__);
  630. return -ENODEV;
  631. }
  632. /* queue a request */
  633. req = bulk_dev->rx_req;
  634. req->length = len;
  635. bulk_dev->rx_done = 0;
  636. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  637. ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL);
  638. if (ret < 0) {
  639. r = -EIO;
  640. pr_err("%s usb ep queue failed\n", __func__);
  641. atomic_set(&bulk_dev->error, 1);
  642. goto done;
  643. }
  644. /* wait for a request to complete */
  645. ret = wait_event_interruptible(bulk_dev->read_wq, bulk_dev->rx_done ||
  646. atomic_read(&bulk_dev->error) ||
  647. !atomic_read(&ccid_dev->online));
  648. if (ret < 0) {
  649. atomic_set(&bulk_dev->error, 1);
  650. r = ret;
  651. usb_ep_dequeue(ccid_dev->out, req);
  652. goto done;
  653. }
  654. if (!atomic_read(&bulk_dev->error)) {
  655. spin_lock_irqsave(&ccid_dev->lock, flags);
  656. if (!atomic_read(&ccid_dev->online)) {
  657. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  658. pr_debug("%s: USB cable not connected\n", __func__);
  659. r = -ENODEV;
  660. goto done;
  661. }
  662. /* If we got a 0-len packet, throw it back and try again. */
  663. if (req->actual == 0) {
  664. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  665. goto requeue_req;
  666. }
  667. if (req->actual > count)
  668. pr_err("%s More data received(%d) than required(%zu)\n",
  669. __func__, req->actual, count);
  670. xfer = (req->actual < count) ? req->actual : count;
  671. atomic_set(&bulk_dev->rx_req_busy, 1);
  672. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  673. if (copy_to_user(buf, req->buf, xfer))
  674. r = -EFAULT;
  675. spin_lock_irqsave(&ccid_dev->lock, flags);
  676. atomic_set(&bulk_dev->rx_req_busy, 0);
  677. if (!atomic_read(&ccid_dev->online)) {
  678. ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
  679. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  680. pr_debug("%s: USB cable not connected\n", __func__);
  681. r = -ENODEV;
  682. goto done;
  683. } else {
  684. r = xfer;
  685. }
  686. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  687. } else {
  688. r = -EIO;
  689. }
  690. done:
  691. pr_debug("%s returning %d\n", __func__, r);
  692. return r;
  693. }
  694. static ssize_t ccid_bulk_write(struct file *fp, const char __user *buf,
  695. size_t count, loff_t *pos)
  696. {
  697. struct f_ccid *ccid_dev = fp->private_data;
  698. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  699. struct usb_request *req = 0;
  700. int r = count;
  701. int ret;
  702. unsigned long flags;
  703. pr_debug("%s: %zu bytes\n", __func__, count);
  704. if (!atomic_read(&ccid_dev->online)) {
  705. pr_debug("%s: USB cable not connected\n", __func__);
  706. return -ENODEV;
  707. }
  708. if (!count) {
  709. pr_err("%s: zero length ctrl pkt\n", __func__);
  710. return -ENODEV;
  711. }
  712. if (count > BULK_IN_BUFFER_SIZE) {
  713. pr_err("%s: max_buffer_size:%zu given_pkt_size:%zu\n",
  714. __func__, BULK_IN_BUFFER_SIZE, count);
  715. return -ENOMEM;
  716. }
  717. /* get an idle tx request to use */
  718. ret = wait_event_interruptible(bulk_dev->write_wq,
  719. ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)) ||
  720. atomic_read(&bulk_dev->error)));
  721. if (ret < 0) {
  722. r = ret;
  723. goto done;
  724. }
  725. if (!req || atomic_read(&bulk_dev->error)) {
  726. pr_err(" %s dev->error\n", __func__);
  727. r = -EIO;
  728. goto done;
  729. }
  730. if (copy_from_user(req->buf, buf, count)) {
  731. if (!atomic_read(&ccid_dev->online)) {
  732. pr_debug("%s: USB cable not connected\n",
  733. __func__);
  734. ccid_request_free(req, ccid_dev->in);
  735. r = -ENODEV;
  736. } else {
  737. ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
  738. r = -EFAULT;
  739. }
  740. goto done;
  741. }
  742. req->length = count;
  743. ret = usb_ep_queue(ccid_dev->in, req, GFP_KERNEL);
  744. if (ret < 0) {
  745. pr_debug("%s: xfer error %d\n", __func__, ret);
  746. atomic_set(&bulk_dev->error, 1);
  747. ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
  748. r = -EIO;
  749. spin_lock_irqsave(&ccid_dev->lock, flags);
  750. if (!atomic_read(&ccid_dev->online)) {
  751. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  752. pr_debug("%s: USB cable not connected\n",
  753. __func__);
  754. while ((req = ccid_req_get(ccid_dev,
  755. &bulk_dev->tx_idle)))
  756. ccid_request_free(req, ccid_dev->in);
  757. r = -ENODEV;
  758. }
  759. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  760. goto done;
  761. }
  762. done:
  763. pr_debug("%s returning %d\n", __func__, r);
  764. return r;
  765. }
  766. static const struct file_operations ccid_bulk_fops = {
  767. .owner = THIS_MODULE,
  768. .read = ccid_bulk_read,
  769. .write = ccid_bulk_write,
  770. .open = ccid_bulk_open,
  771. .release = ccid_bulk_release,
  772. };
  773. static int ccid_ctrl_open(struct inode *inode, struct file *fp)
  774. {
  775. struct ccid_ctrl_dev *ctrl_dev = container_of(inode->i_cdev,
  776. struct ccid_ctrl_dev, cdev);
  777. struct f_ccid *ccid_dev = ctrl_dev_to_ccid(ctrl_dev);
  778. unsigned long flags;
  779. if (!atomic_read(&ccid_dev->online)) {
  780. pr_debug("%s: USB cable not connected\n", __func__);
  781. return -ENODEV;
  782. }
  783. if (atomic_read(&ctrl_dev->opened)) {
  784. pr_debug("%s: ctrl device is already opened\n", __func__);
  785. return -EBUSY;
  786. }
  787. atomic_set(&ctrl_dev->opened, 1);
  788. spin_lock_irqsave(&ccid_dev->lock, flags);
  789. fp->private_data = ccid_dev;
  790. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  791. return 0;
  792. }
  793. static int ccid_ctrl_release(struct inode *inode, struct file *fp)
  794. {
  795. struct f_ccid *ccid_dev = fp->private_data;
  796. struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
  797. atomic_set(&ctrl_dev->opened, 0);
  798. return 0;
  799. }
  800. static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf,
  801. size_t count, loff_t *ppos)
  802. {
  803. struct f_ccid *ccid_dev = fp->private_data;
  804. struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
  805. int ret = 0;
  806. if (!atomic_read(&ccid_dev->online)) {
  807. pr_debug("%s: USB cable not connected\n", __func__);
  808. return -ENODEV;
  809. }
  810. if (count > CTRL_BUF_SIZE)
  811. count = CTRL_BUF_SIZE;
  812. ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
  813. ctrl_dev->tx_ctrl_done ||
  814. !atomic_read(&ccid_dev->online));
  815. if (ret < 0)
  816. return ret;
  817. ctrl_dev->tx_ctrl_done = 0;
  818. if (!atomic_read(&ccid_dev->online)) {
  819. pr_debug("%s: USB cable not connected\n", __func__);
  820. return -ENODEV;
  821. }
  822. ret = copy_to_user(buf, ctrl_dev->buf, count);
  823. if (ret)
  824. return -EFAULT;
  825. return count;
  826. }
  827. static long
  828. ccid_ctrl_ioctl(struct file *fp, unsigned int cmd, u_long arg)
  829. {
  830. struct f_ccid *ccid_dev = fp->private_data;
  831. struct usb_request *req = ccid_dev->notify_req;
  832. struct usb_ccid_notification *ccid_notify = req->buf;
  833. void __user *argp = (void __user *)arg;
  834. int ret = 0;
  835. switch (cmd) {
  836. case CCID_NOTIFY_CARD:
  837. if (copy_from_user(ccid_notify, argp,
  838. sizeof(struct usb_ccid_notification)))
  839. return -EFAULT;
  840. req->length = 2;
  841. break;
  842. case CCID_NOTIFY_HWERROR:
  843. if (copy_from_user(ccid_notify, argp,
  844. sizeof(struct usb_ccid_notification)))
  845. return -EFAULT;
  846. req->length = 4;
  847. break;
  848. case CCID_READ_DTR:
  849. if (copy_to_user((int *)arg, &ccid_dev->dtr_state, sizeof(int)))
  850. return -EFAULT;
  851. return 0;
  852. }
  853. ret = usb_ep_queue(ccid_dev->notify, ccid_dev->notify_req, GFP_KERNEL);
  854. if (ret < 0) {
  855. pr_err("ccid notify ep enqueue error %d\n", ret);
  856. return ret;
  857. }
  858. return 0;
  859. }
  860. static const struct file_operations ccid_ctrl_fops = {
  861. .owner = THIS_MODULE,
  862. .open = ccid_ctrl_open,
  863. .release = ccid_ctrl_release,
  864. .read = ccid_ctrl_read,
  865. .unlocked_ioctl = ccid_ctrl_ioctl,
  866. };
  867. static int ccid_cdev_init(struct cdev *cdev, const struct file_operations *fops,
  868. const char *name)
  869. {
  870. struct device *dev;
  871. int ret, minor;
  872. minor = ida_simple_get(&ccid_ida, 0, MAX_INSTANCES, GFP_KERNEL);
  873. if (minor < 0) {
  874. pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
  875. minor);
  876. return minor;
  877. }
  878. cdev_init(cdev, fops);
  879. ret = cdev_add(cdev, MKDEV(major, minor), 1);
  880. if (ret) {
  881. pr_err("Failed to add cdev for (%s)\n", name);
  882. goto err_cdev_add;
  883. }
  884. dev = device_create(ccid_class, NULL, MKDEV(major, minor), NULL, name);
  885. if (IS_ERR(dev)) {
  886. ret = PTR_ERR(dev);
  887. goto err_create_dev;
  888. }
  889. return 0;
  890. err_create_dev:
  891. cdev_del(cdev);
  892. err_cdev_add:
  893. ida_simple_remove(&ccid_ida, minor);
  894. return ret;
  895. }
  896. static void ccid_cdev_free(struct cdev *cdev)
  897. {
  898. int minor = MINOR(cdev->dev);
  899. device_destroy(ccid_class, cdev->dev);
  900. cdev_del(cdev);
  901. ida_simple_remove(&ccid_ida, minor);
  902. }
  903. static void ccid_free_func(struct usb_function *f)
  904. { }
  905. static int ccid_bind_config(struct f_ccid *ccid_dev)
  906. {
  907. ccid_dev->function.name = FUNCTION_NAME;
  908. ccid_dev->function.bind = ccid_function_bind;
  909. ccid_dev->function.unbind = ccid_function_unbind;
  910. ccid_dev->function.set_alt = ccid_function_set_alt;
  911. ccid_dev->function.setup = ccid_function_setup;
  912. ccid_dev->function.disable = ccid_function_disable;
  913. ccid_dev->function.free_func = ccid_free_func;
  914. return 0;
  915. }
  916. static int ccid_alloc_chrdev_region(void)
  917. {
  918. int ret;
  919. dev_t dev;
  920. ccid_class = class_create(THIS_MODULE, "ccid_usb");
  921. if (IS_ERR(ccid_class)) {
  922. ret = PTR_ERR(ccid_class);
  923. ccid_class = NULL;
  924. pr_err("%s: class_create() failed:%d\n", __func__, ret);
  925. return ret;
  926. }
  927. ret = alloc_chrdev_region(&dev, 0, MAX_INSTANCES, "ccid_usb");
  928. if (ret) {
  929. pr_err("%s: alloc_chrdev_region() failed:%d\n", __func__, ret);
  930. class_destroy(ccid_class);
  931. ccid_class = NULL;
  932. return ret;
  933. }
  934. major = MAJOR(dev);
  935. return 0;
  936. }
  937. static void ccid_free_chrdev_region(void)
  938. {
  939. mutex_lock(&ccid_ida_lock);
  940. if (ida_is_empty(&ccid_ida)) {
  941. if (major) {
  942. unregister_chrdev_region(MKDEV(major, 0),
  943. MAX_INSTANCES);
  944. major = 0;
  945. }
  946. if (ccid_class) {
  947. class_destroy(ccid_class);
  948. ccid_class = NULL;
  949. }
  950. }
  951. mutex_unlock(&ccid_ida_lock);
  952. }
  953. static struct f_ccid *ccid_setup(void)
  954. {
  955. struct f_ccid *ccid_dev;
  956. int ret;
  957. ccid_dev = kzalloc(sizeof(*ccid_dev), GFP_KERNEL);
  958. if (!ccid_dev) {
  959. ret = -ENOMEM;
  960. goto error;
  961. }
  962. spin_lock_init(&ccid_dev->lock);
  963. INIT_LIST_HEAD(&ccid_dev->ctrl_dev.tx_q);
  964. init_waitqueue_head(&ccid_dev->ctrl_dev.tx_wait_q);
  965. init_waitqueue_head(&ccid_dev->bulk_dev.read_wq);
  966. init_waitqueue_head(&ccid_dev->bulk_dev.write_wq);
  967. INIT_LIST_HEAD(&ccid_dev->bulk_dev.tx_idle);
  968. mutex_lock(&ccid_ida_lock);
  969. if (ida_is_empty(&ccid_ida)) {
  970. ret = ccid_alloc_chrdev_region();
  971. if (ret) {
  972. mutex_unlock(&ccid_ida_lock);
  973. goto err_chrdev;
  974. }
  975. }
  976. mutex_unlock(&ccid_ida_lock);
  977. ret = ccid_cdev_init(&ccid_dev->ctrl_dev.cdev, &ccid_ctrl_fops,
  978. CCID_CTRL_DEV_NAME);
  979. if (ret) {
  980. pr_err("%s: ccid_ctrl_device_init failed, err:%d\n",
  981. __func__, ret);
  982. goto err_ctrl_init;
  983. }
  984. ret = ccid_cdev_init(&ccid_dev->bulk_dev.cdev, &ccid_bulk_fops,
  985. CCID_BULK_DEV_NAME);
  986. if (ret) {
  987. pr_err("%s: ccid_bulk_device_init failed, err:%d\n",
  988. __func__, ret);
  989. goto err_bulk_init;
  990. }
  991. return ccid_dev;
  992. err_bulk_init:
  993. ccid_cdev_free(&ccid_dev->ctrl_dev.cdev);
  994. err_ctrl_init:
  995. ccid_free_chrdev_region();
  996. err_chrdev:
  997. kfree(ccid_dev);
  998. error:
  999. pr_err("ccid gadget driver failed to initialize\n");
  1000. return ERR_PTR(ret);
  1001. }
  1002. static inline struct ccid_opts *to_ccid_opts(struct config_item *item)
  1003. {
  1004. return container_of(to_config_group(item), struct ccid_opts,
  1005. func_inst.group);
  1006. }
  1007. static void ccid_attr_release(struct config_item *item)
  1008. {
  1009. struct ccid_opts *opts = to_ccid_opts(item);
  1010. usb_put_function_instance(&opts->func_inst);
  1011. }
  1012. static struct configfs_item_operations ccid_item_ops = {
  1013. .release = ccid_attr_release,
  1014. };
  1015. static struct config_item_type ccid_func_type = {
  1016. .ct_item_ops = &ccid_item_ops,
  1017. .ct_owner = THIS_MODULE,
  1018. };
  1019. static int ccid_set_inst_name(struct usb_function_instance *fi,
  1020. const char *name)
  1021. {
  1022. int name_len;
  1023. struct f_ccid *ccid;
  1024. struct ccid_opts *opts = container_of(fi, struct ccid_opts, func_inst);
  1025. name_len = strlen(name) + 1;
  1026. if (name_len > MAX_INST_NAME_LEN)
  1027. return -ENAMETOOLONG;
  1028. ccid = ccid_setup();
  1029. if (IS_ERR(ccid))
  1030. return PTR_ERR(ccid);
  1031. opts->ccid = ccid;
  1032. return 0;
  1033. }
  1034. static void ccid_free_inst(struct usb_function_instance *f)
  1035. {
  1036. struct ccid_opts *opts = container_of(f, struct ccid_opts, func_inst);
  1037. if (!opts->ccid)
  1038. return;
  1039. ccid_cdev_free(&opts->ccid->ctrl_dev.cdev);
  1040. ccid_cdev_free(&opts->ccid->bulk_dev.cdev);
  1041. ccid_free_chrdev_region();
  1042. kfree(opts->ccid);
  1043. kfree(opts);
  1044. }
  1045. static struct usb_function_instance *ccid_alloc_inst(void)
  1046. {
  1047. struct ccid_opts *opts;
  1048. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  1049. if (!opts)
  1050. return ERR_PTR(-ENOMEM);
  1051. opts->func_inst.set_inst_name = ccid_set_inst_name;
  1052. opts->func_inst.free_func_inst = ccid_free_inst;
  1053. config_group_init_type_name(&opts->func_inst.group, "",
  1054. &ccid_func_type);
  1055. return &opts->func_inst;
  1056. }
  1057. static struct usb_function *ccid_alloc(struct usb_function_instance *fi)
  1058. {
  1059. struct ccid_opts *opts;
  1060. int ret;
  1061. opts = container_of(fi, struct ccid_opts, func_inst);
  1062. ret = ccid_bind_config(opts->ccid);
  1063. if (ret)
  1064. return ERR_PTR(ret);
  1065. return &opts->ccid->function;
  1066. }
  1067. DECLARE_USB_FUNCTION_INIT(ccid, ccid_alloc_inst, ccid_alloc);
  1068. MODULE_DESCRIPTION("USB CCID function Driver");
  1069. MODULE_LICENSE("GPL");