es2.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Greybus "AP" USB driver for "ES2" controller chips
  4. *
  5. * Copyright 2014-2015 Google Inc.
  6. * Copyright 2014-2015 Linaro Ltd.
  7. */
  8. #include <linux/kthread.h>
  9. #include <linux/sizes.h>
  10. #include <linux/usb.h>
  11. #include <linux/kfifo.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/list.h>
  14. #include <linux/greybus.h>
  15. #include <asm/unaligned.h>
  16. #include "arpc.h"
  17. #include "greybus_trace.h"
  18. /* Default timeout for USB vendor requests. */
  19. #define ES2_USB_CTRL_TIMEOUT 500
  20. /* Default timeout for ARPC CPort requests */
  21. #define ES2_ARPC_CPORT_TIMEOUT 500
  22. /* Fixed CPort numbers */
  23. #define ES2_CPORT_CDSI0 16
  24. #define ES2_CPORT_CDSI1 17
  25. /* Memory sizes for the buffers sent to/from the ES2 controller */
  26. #define ES2_GBUF_MSG_SIZE_MAX 2048
  27. /* Memory sizes for the ARPC buffers */
  28. #define ARPC_OUT_SIZE_MAX U16_MAX
  29. #define ARPC_IN_SIZE_MAX 128
  30. static const struct usb_device_id id_table[] = {
  31. { USB_DEVICE(0x18d1, 0x1eaf) },
  32. { },
  33. };
  34. MODULE_DEVICE_TABLE(usb, id_table);
  35. #define APB1_LOG_SIZE SZ_16K
  36. /*
  37. * Number of CPort IN urbs in flight at any point in time.
  38. * Adjust if we are having stalls in the USB buffer due to not enough urbs in
  39. * flight.
  40. */
  41. #define NUM_CPORT_IN_URB 4
  42. /* Number of CPort OUT urbs in flight at any point in time.
  43. * Adjust if we get messages saying we are out of urbs in the system log.
  44. */
  45. #define NUM_CPORT_OUT_URB 8
  46. /*
  47. * Number of ARPC in urbs in flight at any point in time.
  48. */
  49. #define NUM_ARPC_IN_URB 2
  50. /*
  51. * @endpoint: bulk in endpoint for CPort data
  52. * @urb: array of urbs for the CPort in messages
  53. * @buffer: array of buffers for the @cport_in_urb urbs
  54. */
  55. struct es2_cport_in {
  56. __u8 endpoint;
  57. struct urb *urb[NUM_CPORT_IN_URB];
  58. u8 *buffer[NUM_CPORT_IN_URB];
  59. };
  60. /**
  61. * struct es2_ap_dev - ES2 USB Bridge to AP structure
  62. * @usb_dev: pointer to the USB device we are.
  63. * @usb_intf: pointer to the USB interface we are bound to.
  64. * @hd: pointer to our gb_host_device structure
  65. *
  66. * @cport_in: endpoint, urbs and buffer for cport in messages
  67. * @cport_out_endpoint: endpoint for cport out messages
  68. * @cport_out_urb: array of urbs for the CPort out messages
  69. * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
  70. * not.
  71. * @cport_out_urb_cancelled: array of flags indicating whether the
  72. * corresponding @cport_out_urb is being cancelled
  73. * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
  74. * @cdsi1_in_use: true if cport CDSI1 is in use
  75. * @apb_log_task: task pointer for logging thread
  76. * @apb_log_dentry: file system entry for the log file interface
  77. * @apb_log_enable_dentry: file system entry for enabling logging
  78. * @apb_log_fifo: kernel FIFO to carry logged data
  79. * @arpc_urb: array of urbs for the ARPC in messages
  80. * @arpc_buffer: array of buffers for the @arpc_urb urbs
  81. * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
  82. * @arpc_id_cycle: gives an unique id to ARPC
  83. * @arpc_lock: locks ARPC list
  84. * @arpcs: list of in progress ARPCs
  85. */
  86. struct es2_ap_dev {
  87. struct usb_device *usb_dev;
  88. struct usb_interface *usb_intf;
  89. struct gb_host_device *hd;
  90. struct es2_cport_in cport_in;
  91. __u8 cport_out_endpoint;
  92. struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
  93. bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
  94. bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
  95. spinlock_t cport_out_urb_lock;
  96. bool cdsi1_in_use;
  97. struct task_struct *apb_log_task;
  98. struct dentry *apb_log_dentry;
  99. struct dentry *apb_log_enable_dentry;
  100. DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
  101. __u8 arpc_endpoint_in;
  102. struct urb *arpc_urb[NUM_ARPC_IN_URB];
  103. u8 *arpc_buffer[NUM_ARPC_IN_URB];
  104. int arpc_id_cycle;
  105. spinlock_t arpc_lock;
  106. struct list_head arpcs;
  107. };
  108. struct arpc {
  109. struct list_head list;
  110. struct arpc_request_message *req;
  111. struct arpc_response_message *resp;
  112. struct completion response_received;
  113. bool active;
  114. };
  115. static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
  116. {
  117. return (struct es2_ap_dev *)&hd->hd_priv;
  118. }
  119. static void cport_out_callback(struct urb *urb);
  120. static void usb_log_enable(struct es2_ap_dev *es2);
  121. static void usb_log_disable(struct es2_ap_dev *es2);
  122. static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
  123. size_t size, int *result, unsigned int timeout);
  124. static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
  125. {
  126. struct usb_device *udev = es2->usb_dev;
  127. u8 *data;
  128. int retval;
  129. data = kmemdup(req, size, GFP_KERNEL);
  130. if (!data)
  131. return -ENOMEM;
  132. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  133. cmd,
  134. USB_DIR_OUT | USB_TYPE_VENDOR |
  135. USB_RECIP_INTERFACE,
  136. 0, 0, data, size, ES2_USB_CTRL_TIMEOUT);
  137. if (retval < 0)
  138. dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
  139. else
  140. retval = 0;
  141. kfree(data);
  142. return retval;
  143. }
  144. static void ap_urb_complete(struct urb *urb)
  145. {
  146. struct usb_ctrlrequest *dr = urb->context;
  147. kfree(dr);
  148. usb_free_urb(urb);
  149. }
  150. static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
  151. {
  152. struct usb_device *udev = es2->usb_dev;
  153. struct urb *urb;
  154. struct usb_ctrlrequest *dr;
  155. u8 *buf;
  156. int retval;
  157. urb = usb_alloc_urb(0, GFP_ATOMIC);
  158. if (!urb)
  159. return -ENOMEM;
  160. dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
  161. if (!dr) {
  162. usb_free_urb(urb);
  163. return -ENOMEM;
  164. }
  165. buf = (u8 *)dr + sizeof(*dr);
  166. memcpy(buf, req, size);
  167. dr->bRequest = cmd;
  168. dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
  169. dr->wValue = 0;
  170. dr->wIndex = 0;
  171. dr->wLength = cpu_to_le16(size);
  172. usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
  173. (unsigned char *)dr, buf, size,
  174. ap_urb_complete, dr);
  175. retval = usb_submit_urb(urb, GFP_ATOMIC);
  176. if (retval) {
  177. usb_free_urb(urb);
  178. kfree(dr);
  179. }
  180. return retval;
  181. }
  182. static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
  183. bool async)
  184. {
  185. struct es2_ap_dev *es2 = hd_to_es2(hd);
  186. if (async)
  187. return output_async(es2, req, size, cmd);
  188. return output_sync(es2, req, size, cmd);
  189. }
  190. static int es2_cport_in_enable(struct es2_ap_dev *es2,
  191. struct es2_cport_in *cport_in)
  192. {
  193. struct urb *urb;
  194. int ret;
  195. int i;
  196. for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
  197. urb = cport_in->urb[i];
  198. ret = usb_submit_urb(urb, GFP_KERNEL);
  199. if (ret) {
  200. dev_err(&es2->usb_dev->dev,
  201. "failed to submit in-urb: %d\n", ret);
  202. goto err_kill_urbs;
  203. }
  204. }
  205. return 0;
  206. err_kill_urbs:
  207. for (--i; i >= 0; --i) {
  208. urb = cport_in->urb[i];
  209. usb_kill_urb(urb);
  210. }
  211. return ret;
  212. }
  213. static void es2_cport_in_disable(struct es2_ap_dev *es2,
  214. struct es2_cport_in *cport_in)
  215. {
  216. struct urb *urb;
  217. int i;
  218. for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
  219. urb = cport_in->urb[i];
  220. usb_kill_urb(urb);
  221. }
  222. }
  223. static int es2_arpc_in_enable(struct es2_ap_dev *es2)
  224. {
  225. struct urb *urb;
  226. int ret;
  227. int i;
  228. for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
  229. urb = es2->arpc_urb[i];
  230. ret = usb_submit_urb(urb, GFP_KERNEL);
  231. if (ret) {
  232. dev_err(&es2->usb_dev->dev,
  233. "failed to submit arpc in-urb: %d\n", ret);
  234. goto err_kill_urbs;
  235. }
  236. }
  237. return 0;
  238. err_kill_urbs:
  239. for (--i; i >= 0; --i) {
  240. urb = es2->arpc_urb[i];
  241. usb_kill_urb(urb);
  242. }
  243. return ret;
  244. }
  245. static void es2_arpc_in_disable(struct es2_ap_dev *es2)
  246. {
  247. struct urb *urb;
  248. int i;
  249. for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
  250. urb = es2->arpc_urb[i];
  251. usb_kill_urb(urb);
  252. }
  253. }
  254. static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
  255. {
  256. struct urb *urb = NULL;
  257. unsigned long flags;
  258. int i;
  259. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  260. /* Look in our pool of allocated urbs first, as that's the "fastest" */
  261. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  262. if (!es2->cport_out_urb_busy[i] &&
  263. !es2->cport_out_urb_cancelled[i]) {
  264. es2->cport_out_urb_busy[i] = true;
  265. urb = es2->cport_out_urb[i];
  266. break;
  267. }
  268. }
  269. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  270. if (urb)
  271. return urb;
  272. /*
  273. * Crap, pool is empty, complain to the syslog and go allocate one
  274. * dynamically as we have to succeed.
  275. */
  276. dev_dbg(&es2->usb_dev->dev,
  277. "No free CPort OUT urbs, having to dynamically allocate one!\n");
  278. return usb_alloc_urb(0, gfp_mask);
  279. }
  280. static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
  281. {
  282. unsigned long flags;
  283. int i;
  284. /*
  285. * See if this was an urb in our pool, if so mark it "free", otherwise
  286. * we need to free it ourselves.
  287. */
  288. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  289. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  290. if (urb == es2->cport_out_urb[i]) {
  291. es2->cport_out_urb_busy[i] = false;
  292. urb = NULL;
  293. break;
  294. }
  295. }
  296. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  297. /* If urb is not NULL, then we need to free this urb */
  298. usb_free_urb(urb);
  299. }
  300. /*
  301. * We (ab)use the operation-message header pad bytes to transfer the
  302. * cport id in order to minimise overhead.
  303. */
  304. static void
  305. gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
  306. {
  307. header->pad[0] = cport_id;
  308. }
  309. /* Clear the pad bytes used for the CPort id */
  310. static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
  311. {
  312. header->pad[0] = 0;
  313. }
  314. /* Extract the CPort id packed into the header, and clear it */
  315. static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
  316. {
  317. u16 cport_id = header->pad[0];
  318. gb_message_cport_clear(header);
  319. return cport_id;
  320. }
  321. /*
  322. * Returns zero if the message was successfully queued, or a negative errno
  323. * otherwise.
  324. */
  325. static int message_send(struct gb_host_device *hd, u16 cport_id,
  326. struct gb_message *message, gfp_t gfp_mask)
  327. {
  328. struct es2_ap_dev *es2 = hd_to_es2(hd);
  329. struct usb_device *udev = es2->usb_dev;
  330. size_t buffer_size;
  331. int retval;
  332. struct urb *urb;
  333. unsigned long flags;
  334. /*
  335. * The data actually transferred will include an indication
  336. * of where the data should be sent. Do one last check of
  337. * the target CPort id before filling it in.
  338. */
  339. if (!cport_id_valid(hd, cport_id)) {
  340. dev_err(&udev->dev, "invalid cport %u\n", cport_id);
  341. return -EINVAL;
  342. }
  343. /* Find a free urb */
  344. urb = next_free_urb(es2, gfp_mask);
  345. if (!urb)
  346. return -ENOMEM;
  347. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  348. message->hcpriv = urb;
  349. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  350. /* Pack the cport id into the message header */
  351. gb_message_cport_pack(message->header, cport_id);
  352. buffer_size = sizeof(*message->header) + message->payload_size;
  353. usb_fill_bulk_urb(urb, udev,
  354. usb_sndbulkpipe(udev,
  355. es2->cport_out_endpoint),
  356. message->buffer, buffer_size,
  357. cport_out_callback, message);
  358. urb->transfer_flags |= URB_ZERO_PACKET;
  359. trace_gb_message_submit(message);
  360. retval = usb_submit_urb(urb, gfp_mask);
  361. if (retval) {
  362. dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
  363. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  364. message->hcpriv = NULL;
  365. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  366. free_urb(es2, urb);
  367. gb_message_cport_clear(message->header);
  368. return retval;
  369. }
  370. return 0;
  371. }
  372. /*
  373. * Can not be called in atomic context.
  374. */
  375. static void message_cancel(struct gb_message *message)
  376. {
  377. struct gb_host_device *hd = message->operation->connection->hd;
  378. struct es2_ap_dev *es2 = hd_to_es2(hd);
  379. struct urb *urb;
  380. int i;
  381. might_sleep();
  382. spin_lock_irq(&es2->cport_out_urb_lock);
  383. urb = message->hcpriv;
  384. /* Prevent dynamically allocated urb from being deallocated. */
  385. usb_get_urb(urb);
  386. /* Prevent pre-allocated urb from being reused. */
  387. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  388. if (urb == es2->cport_out_urb[i]) {
  389. es2->cport_out_urb_cancelled[i] = true;
  390. break;
  391. }
  392. }
  393. spin_unlock_irq(&es2->cport_out_urb_lock);
  394. usb_kill_urb(urb);
  395. if (i < NUM_CPORT_OUT_URB) {
  396. spin_lock_irq(&es2->cport_out_urb_lock);
  397. es2->cport_out_urb_cancelled[i] = false;
  398. spin_unlock_irq(&es2->cport_out_urb_lock);
  399. }
  400. usb_free_urb(urb);
  401. }
  402. static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
  403. unsigned long flags)
  404. {
  405. struct es2_ap_dev *es2 = hd_to_es2(hd);
  406. struct ida *id_map = &hd->cport_id_map;
  407. int ida_start, ida_end;
  408. switch (cport_id) {
  409. case ES2_CPORT_CDSI0:
  410. case ES2_CPORT_CDSI1:
  411. dev_err(&hd->dev, "cport %d not available\n", cport_id);
  412. return -EBUSY;
  413. }
  414. if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
  415. flags & GB_CONNECTION_FLAG_CDSI1) {
  416. if (es2->cdsi1_in_use) {
  417. dev_err(&hd->dev, "CDSI1 already in use\n");
  418. return -EBUSY;
  419. }
  420. es2->cdsi1_in_use = true;
  421. return ES2_CPORT_CDSI1;
  422. }
  423. if (cport_id < 0) {
  424. ida_start = 0;
  425. ida_end = hd->num_cports;
  426. } else if (cport_id < hd->num_cports) {
  427. ida_start = cport_id;
  428. ida_end = cport_id + 1;
  429. } else {
  430. dev_err(&hd->dev, "cport %d not available\n", cport_id);
  431. return -EINVAL;
  432. }
  433. return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
  434. }
  435. static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
  436. {
  437. struct es2_ap_dev *es2 = hd_to_es2(hd);
  438. switch (cport_id) {
  439. case ES2_CPORT_CDSI1:
  440. es2->cdsi1_in_use = false;
  441. return;
  442. }
  443. ida_simple_remove(&hd->cport_id_map, cport_id);
  444. }
  445. static int cport_enable(struct gb_host_device *hd, u16 cport_id,
  446. unsigned long flags)
  447. {
  448. struct es2_ap_dev *es2 = hd_to_es2(hd);
  449. struct usb_device *udev = es2->usb_dev;
  450. struct gb_apb_request_cport_flags *req;
  451. u32 connection_flags;
  452. int ret;
  453. req = kzalloc(sizeof(*req), GFP_KERNEL);
  454. if (!req)
  455. return -ENOMEM;
  456. connection_flags = 0;
  457. if (flags & GB_CONNECTION_FLAG_CONTROL)
  458. connection_flags |= GB_APB_CPORT_FLAG_CONTROL;
  459. if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
  460. connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;
  461. req->flags = cpu_to_le32(connection_flags);
  462. dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
  463. cport_id, connection_flags);
  464. ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  465. GB_APB_REQUEST_CPORT_FLAGS,
  466. USB_DIR_OUT | USB_TYPE_VENDOR |
  467. USB_RECIP_INTERFACE, cport_id, 0,
  468. req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
  469. if (ret < 0) {
  470. dev_err(&udev->dev, "failed to set cport flags for port %d\n",
  471. cport_id);
  472. goto out;
  473. }
  474. ret = 0;
  475. out:
  476. kfree(req);
  477. return ret;
  478. }
  479. static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
  480. {
  481. struct es2_ap_dev *es2 = hd_to_es2(hd);
  482. struct device *dev = &es2->usb_dev->dev;
  483. struct arpc_cport_connected_req req;
  484. int ret;
  485. req.cport_id = cpu_to_le16(cport_id);
  486. ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
  487. NULL, ES2_ARPC_CPORT_TIMEOUT);
  488. if (ret) {
  489. dev_err(dev, "failed to set connected state for cport %u: %d\n",
  490. cport_id, ret);
  491. return ret;
  492. }
  493. return 0;
  494. }
  495. static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
  496. {
  497. struct es2_ap_dev *es2 = hd_to_es2(hd);
  498. struct device *dev = &es2->usb_dev->dev;
  499. struct arpc_cport_flush_req req;
  500. int ret;
  501. req.cport_id = cpu_to_le16(cport_id);
  502. ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
  503. NULL, ES2_ARPC_CPORT_TIMEOUT);
  504. if (ret) {
  505. dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret);
  506. return ret;
  507. }
  508. return 0;
  509. }
  510. static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
  511. u8 phase, unsigned int timeout)
  512. {
  513. struct es2_ap_dev *es2 = hd_to_es2(hd);
  514. struct device *dev = &es2->usb_dev->dev;
  515. struct arpc_cport_shutdown_req req;
  516. int result;
  517. int ret;
  518. if (timeout > U16_MAX)
  519. return -EINVAL;
  520. req.cport_id = cpu_to_le16(cport_id);
  521. req.timeout = cpu_to_le16(timeout);
  522. req.phase = phase;
  523. ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
  524. &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
  525. if (ret) {
  526. dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n",
  527. cport_id, ret, result);
  528. return ret;
  529. }
  530. return 0;
  531. }
  532. static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
  533. size_t peer_space, unsigned int timeout)
  534. {
  535. struct es2_ap_dev *es2 = hd_to_es2(hd);
  536. struct device *dev = &es2->usb_dev->dev;
  537. struct arpc_cport_quiesce_req req;
  538. int result;
  539. int ret;
  540. if (peer_space > U16_MAX)
  541. return -EINVAL;
  542. if (timeout > U16_MAX)
  543. return -EINVAL;
  544. req.cport_id = cpu_to_le16(cport_id);
  545. req.peer_space = cpu_to_le16(peer_space);
  546. req.timeout = cpu_to_le16(timeout);
  547. ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
  548. &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
  549. if (ret) {
  550. dev_err(dev, "failed to quiesce cport %u: %d (%d)\n",
  551. cport_id, ret, result);
  552. return ret;
  553. }
  554. return 0;
  555. }
  556. static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
  557. {
  558. struct es2_ap_dev *es2 = hd_to_es2(hd);
  559. struct device *dev = &es2->usb_dev->dev;
  560. struct arpc_cport_clear_req req;
  561. int ret;
  562. req.cport_id = cpu_to_le16(cport_id);
  563. ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
  564. NULL, ES2_ARPC_CPORT_TIMEOUT);
  565. if (ret) {
  566. dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret);
  567. return ret;
  568. }
  569. return 0;
  570. }
  571. static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
  572. {
  573. int retval;
  574. struct es2_ap_dev *es2 = hd_to_es2(hd);
  575. struct usb_device *udev = es2->usb_dev;
  576. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  577. GB_APB_REQUEST_LATENCY_TAG_EN,
  578. USB_DIR_OUT | USB_TYPE_VENDOR |
  579. USB_RECIP_INTERFACE, cport_id, 0, NULL,
  580. 0, ES2_USB_CTRL_TIMEOUT);
  581. if (retval < 0)
  582. dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
  583. cport_id);
  584. return retval;
  585. }
  586. static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
  587. {
  588. int retval;
  589. struct es2_ap_dev *es2 = hd_to_es2(hd);
  590. struct usb_device *udev = es2->usb_dev;
  591. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  592. GB_APB_REQUEST_LATENCY_TAG_DIS,
  593. USB_DIR_OUT | USB_TYPE_VENDOR |
  594. USB_RECIP_INTERFACE, cport_id, 0, NULL,
  595. 0, ES2_USB_CTRL_TIMEOUT);
  596. if (retval < 0)
  597. dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
  598. cport_id);
  599. return retval;
  600. }
  601. static struct gb_hd_driver es2_driver = {
  602. .hd_priv_size = sizeof(struct es2_ap_dev),
  603. .message_send = message_send,
  604. .message_cancel = message_cancel,
  605. .cport_allocate = es2_cport_allocate,
  606. .cport_release = es2_cport_release,
  607. .cport_enable = cport_enable,
  608. .cport_connected = es2_cport_connected,
  609. .cport_flush = es2_cport_flush,
  610. .cport_shutdown = es2_cport_shutdown,
  611. .cport_quiesce = es2_cport_quiesce,
  612. .cport_clear = es2_cport_clear,
  613. .latency_tag_enable = latency_tag_enable,
  614. .latency_tag_disable = latency_tag_disable,
  615. .output = output,
  616. };
  617. /* Common function to report consistent warnings based on URB status */
  618. static int check_urb_status(struct urb *urb)
  619. {
  620. struct device *dev = &urb->dev->dev;
  621. int status = urb->status;
  622. switch (status) {
  623. case 0:
  624. return 0;
  625. case -EOVERFLOW:
  626. dev_err(dev, "%s: overflow actual length is %d\n",
  627. __func__, urb->actual_length);
  628. fallthrough;
  629. case -ECONNRESET:
  630. case -ENOENT:
  631. case -ESHUTDOWN:
  632. case -EILSEQ:
  633. case -EPROTO:
  634. /* device is gone, stop sending */
  635. return status;
  636. }
  637. dev_err(dev, "%s: unknown status %d\n", __func__, status);
  638. return -EAGAIN;
  639. }
  640. static void es2_destroy(struct es2_ap_dev *es2)
  641. {
  642. struct usb_device *udev;
  643. struct urb *urb;
  644. int i;
  645. debugfs_remove(es2->apb_log_enable_dentry);
  646. usb_log_disable(es2);
  647. /* Tear down everything! */
  648. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  649. urb = es2->cport_out_urb[i];
  650. usb_kill_urb(urb);
  651. usb_free_urb(urb);
  652. es2->cport_out_urb[i] = NULL;
  653. es2->cport_out_urb_busy[i] = false; /* just to be anal */
  654. }
  655. for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
  656. usb_free_urb(es2->arpc_urb[i]);
  657. kfree(es2->arpc_buffer[i]);
  658. es2->arpc_buffer[i] = NULL;
  659. }
  660. for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
  661. usb_free_urb(es2->cport_in.urb[i]);
  662. kfree(es2->cport_in.buffer[i]);
  663. es2->cport_in.buffer[i] = NULL;
  664. }
  665. /* release reserved CDSI0 and CDSI1 cports */
  666. gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
  667. gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
  668. udev = es2->usb_dev;
  669. gb_hd_put(es2->hd);
  670. usb_put_dev(udev);
  671. }
  672. static void cport_in_callback(struct urb *urb)
  673. {
  674. struct gb_host_device *hd = urb->context;
  675. struct device *dev = &urb->dev->dev;
  676. struct gb_operation_msg_hdr *header;
  677. int status = check_urb_status(urb);
  678. int retval;
  679. u16 cport_id;
  680. if (status) {
  681. if ((status == -EAGAIN) || (status == -EPROTO))
  682. goto exit;
  683. /* The urb is being unlinked */
  684. if (status == -ENOENT || status == -ESHUTDOWN)
  685. return;
  686. dev_err(dev, "urb cport in error %d (dropped)\n", status);
  687. return;
  688. }
  689. if (urb->actual_length < sizeof(*header)) {
  690. dev_err(dev, "short message received\n");
  691. goto exit;
  692. }
  693. /* Extract the CPort id, which is packed in the message header */
  694. header = urb->transfer_buffer;
  695. cport_id = gb_message_cport_unpack(header);
  696. if (cport_id_valid(hd, cport_id)) {
  697. greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
  698. urb->actual_length);
  699. } else {
  700. dev_err(dev, "invalid cport id %u received\n", cport_id);
  701. }
  702. exit:
  703. /* put our urb back in the request pool */
  704. retval = usb_submit_urb(urb, GFP_ATOMIC);
  705. if (retval)
  706. dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
  707. }
  708. static void cport_out_callback(struct urb *urb)
  709. {
  710. struct gb_message *message = urb->context;
  711. struct gb_host_device *hd = message->operation->connection->hd;
  712. struct es2_ap_dev *es2 = hd_to_es2(hd);
  713. int status = check_urb_status(urb);
  714. unsigned long flags;
  715. gb_message_cport_clear(message->header);
  716. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  717. message->hcpriv = NULL;
  718. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  719. /*
  720. * Tell the submitter that the message send (attempt) is
  721. * complete, and report the status.
  722. */
  723. greybus_message_sent(hd, message, status);
  724. free_urb(es2, urb);
  725. }
  726. static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
  727. {
  728. struct arpc *rpc;
  729. if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
  730. return NULL;
  731. rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
  732. if (!rpc)
  733. return NULL;
  734. INIT_LIST_HEAD(&rpc->list);
  735. rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
  736. if (!rpc->req)
  737. goto err_free_rpc;
  738. rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL);
  739. if (!rpc->resp)
  740. goto err_free_req;
  741. rpc->req->type = type;
  742. rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size);
  743. memcpy(rpc->req->data, payload, size);
  744. init_completion(&rpc->response_received);
  745. return rpc;
  746. err_free_req:
  747. kfree(rpc->req);
  748. err_free_rpc:
  749. kfree(rpc);
  750. return NULL;
  751. }
  752. static void arpc_free(struct arpc *rpc)
  753. {
  754. kfree(rpc->req);
  755. kfree(rpc->resp);
  756. kfree(rpc);
  757. }
  758. static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id)
  759. {
  760. struct arpc *rpc;
  761. list_for_each_entry(rpc, &es2->arpcs, list) {
  762. if (rpc->req->id == id)
  763. return rpc;
  764. }
  765. return NULL;
  766. }
  767. static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc)
  768. {
  769. rpc->active = true;
  770. rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
  771. list_add_tail(&rpc->list, &es2->arpcs);
  772. }
  773. static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc)
  774. {
  775. if (rpc->active) {
  776. rpc->active = false;
  777. list_del(&rpc->list);
  778. }
  779. }
  780. static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
  781. {
  782. struct usb_device *udev = es2->usb_dev;
  783. int retval;
  784. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  785. GB_APB_REQUEST_ARPC_RUN,
  786. USB_DIR_OUT | USB_TYPE_VENDOR |
  787. USB_RECIP_INTERFACE,
  788. 0, 0,
  789. rpc->req, le16_to_cpu(rpc->req->size),
  790. ES2_USB_CTRL_TIMEOUT);
  791. if (retval < 0) {
  792. dev_err(&udev->dev,
  793. "failed to send ARPC request %d: %d\n",
  794. rpc->req->type, retval);
  795. return retval;
  796. }
  797. return 0;
  798. }
  799. static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
  800. size_t size, int *result, unsigned int timeout)
  801. {
  802. struct arpc *rpc;
  803. unsigned long flags;
  804. int retval;
  805. if (result)
  806. *result = 0;
  807. rpc = arpc_alloc(payload, size, type);
  808. if (!rpc)
  809. return -ENOMEM;
  810. spin_lock_irqsave(&es2->arpc_lock, flags);
  811. arpc_add(es2, rpc);
  812. spin_unlock_irqrestore(&es2->arpc_lock, flags);
  813. retval = arpc_send(es2, rpc, timeout);
  814. if (retval)
  815. goto out_arpc_del;
  816. retval = wait_for_completion_interruptible_timeout(
  817. &rpc->response_received,
  818. msecs_to_jiffies(timeout));
  819. if (retval <= 0) {
  820. if (!retval)
  821. retval = -ETIMEDOUT;
  822. goto out_arpc_del;
  823. }
  824. if (rpc->resp->result) {
  825. retval = -EREMOTEIO;
  826. if (result)
  827. *result = rpc->resp->result;
  828. } else {
  829. retval = 0;
  830. }
  831. out_arpc_del:
  832. spin_lock_irqsave(&es2->arpc_lock, flags);
  833. arpc_del(es2, rpc);
  834. spin_unlock_irqrestore(&es2->arpc_lock, flags);
  835. arpc_free(rpc);
  836. if (retval < 0 && retval != -EREMOTEIO) {
  837. dev_err(&es2->usb_dev->dev,
  838. "failed to execute ARPC: %d\n", retval);
  839. }
  840. return retval;
  841. }
  842. static void arpc_in_callback(struct urb *urb)
  843. {
  844. struct es2_ap_dev *es2 = urb->context;
  845. struct device *dev = &urb->dev->dev;
  846. int status = check_urb_status(urb);
  847. struct arpc *rpc;
  848. struct arpc_response_message *resp;
  849. unsigned long flags;
  850. int retval;
  851. if (status) {
  852. if ((status == -EAGAIN) || (status == -EPROTO))
  853. goto exit;
  854. /* The urb is being unlinked */
  855. if (status == -ENOENT || status == -ESHUTDOWN)
  856. return;
  857. dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
  858. return;
  859. }
  860. if (urb->actual_length < sizeof(*resp)) {
  861. dev_err(dev, "short aprc response received\n");
  862. goto exit;
  863. }
  864. resp = urb->transfer_buffer;
  865. spin_lock_irqsave(&es2->arpc_lock, flags);
  866. rpc = arpc_find(es2, resp->id);
  867. if (!rpc) {
  868. dev_err(dev, "invalid arpc response id received: %u\n",
  869. le16_to_cpu(resp->id));
  870. spin_unlock_irqrestore(&es2->arpc_lock, flags);
  871. goto exit;
  872. }
  873. arpc_del(es2, rpc);
  874. memcpy(rpc->resp, resp, sizeof(*resp));
  875. complete(&rpc->response_received);
  876. spin_unlock_irqrestore(&es2->arpc_lock, flags);
  877. exit:
  878. /* put our urb back in the request pool */
  879. retval = usb_submit_urb(urb, GFP_ATOMIC);
  880. if (retval)
  881. dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
  882. }
  883. #define APB1_LOG_MSG_SIZE 64
  884. static void apb_log_get(struct es2_ap_dev *es2, char *buf)
  885. {
  886. int retval;
  887. do {
  888. retval = usb_control_msg(es2->usb_dev,
  889. usb_rcvctrlpipe(es2->usb_dev, 0),
  890. GB_APB_REQUEST_LOG,
  891. USB_DIR_IN | USB_TYPE_VENDOR |
  892. USB_RECIP_INTERFACE,
  893. 0x00, 0x00,
  894. buf,
  895. APB1_LOG_MSG_SIZE,
  896. ES2_USB_CTRL_TIMEOUT);
  897. if (retval > 0)
  898. kfifo_in(&es2->apb_log_fifo, buf, retval);
  899. } while (retval > 0);
  900. }
  901. static int apb_log_poll(void *data)
  902. {
  903. struct es2_ap_dev *es2 = data;
  904. char *buf;
  905. buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
  906. if (!buf)
  907. return -ENOMEM;
  908. while (!kthread_should_stop()) {
  909. msleep(1000);
  910. apb_log_get(es2, buf);
  911. }
  912. kfree(buf);
  913. return 0;
  914. }
  915. static ssize_t apb_log_read(struct file *f, char __user *buf,
  916. size_t count, loff_t *ppos)
  917. {
  918. struct es2_ap_dev *es2 = file_inode(f)->i_private;
  919. ssize_t ret;
  920. size_t copied;
  921. char *tmp_buf;
  922. if (count > APB1_LOG_SIZE)
  923. count = APB1_LOG_SIZE;
  924. tmp_buf = kmalloc(count, GFP_KERNEL);
  925. if (!tmp_buf)
  926. return -ENOMEM;
  927. copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
  928. ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
  929. kfree(tmp_buf);
  930. return ret;
  931. }
  932. static const struct file_operations apb_log_fops = {
  933. .read = apb_log_read,
  934. };
  935. static void usb_log_enable(struct es2_ap_dev *es2)
  936. {
  937. if (!IS_ERR_OR_NULL(es2->apb_log_task))
  938. return;
  939. /* get log from APB1 */
  940. es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
  941. if (IS_ERR(es2->apb_log_task))
  942. return;
  943. /* XXX We will need to rename this per APB */
  944. es2->apb_log_dentry = debugfs_create_file("apb_log", 0444,
  945. gb_debugfs_get(), es2,
  946. &apb_log_fops);
  947. }
  948. static void usb_log_disable(struct es2_ap_dev *es2)
  949. {
  950. if (IS_ERR_OR_NULL(es2->apb_log_task))
  951. return;
  952. debugfs_remove(es2->apb_log_dentry);
  953. es2->apb_log_dentry = NULL;
  954. kthread_stop(es2->apb_log_task);
  955. es2->apb_log_task = NULL;
  956. }
  957. static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
  958. size_t count, loff_t *ppos)
  959. {
  960. struct es2_ap_dev *es2 = file_inode(f)->i_private;
  961. int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
  962. char tmp_buf[3];
  963. sprintf(tmp_buf, "%d\n", enable);
  964. return simple_read_from_buffer(buf, count, ppos, tmp_buf, 2);
  965. }
  966. static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
  967. size_t count, loff_t *ppos)
  968. {
  969. int enable;
  970. ssize_t retval;
  971. struct es2_ap_dev *es2 = file_inode(f)->i_private;
  972. retval = kstrtoint_from_user(buf, count, 10, &enable);
  973. if (retval)
  974. return retval;
  975. if (enable)
  976. usb_log_enable(es2);
  977. else
  978. usb_log_disable(es2);
  979. return count;
  980. }
  981. static const struct file_operations apb_log_enable_fops = {
  982. .read = apb_log_enable_read,
  983. .write = apb_log_enable_write,
  984. };
  985. static int apb_get_cport_count(struct usb_device *udev)
  986. {
  987. int retval;
  988. __le16 *cport_count;
  989. cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
  990. if (!cport_count)
  991. return -ENOMEM;
  992. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  993. GB_APB_REQUEST_CPORT_COUNT,
  994. USB_DIR_IN | USB_TYPE_VENDOR |
  995. USB_RECIP_INTERFACE, 0, 0, cport_count,
  996. sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT);
  997. if (retval != sizeof(*cport_count)) {
  998. dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
  999. retval);
  1000. if (retval >= 0)
  1001. retval = -EIO;
  1002. goto out;
  1003. }
  1004. retval = le16_to_cpu(*cport_count);
  1005. /* We need to fit a CPort ID in one byte of a message header */
  1006. if (retval > U8_MAX) {
  1007. retval = U8_MAX;
  1008. dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
  1009. }
  1010. out:
  1011. kfree(cport_count);
  1012. return retval;
  1013. }
  1014. /*
  1015. * The ES2 USB Bridge device has 15 endpoints
  1016. * 1 Control - usual USB stuff + AP -> APBridgeA messages
  1017. * 7 Bulk IN - CPort data in
  1018. * 7 Bulk OUT - CPort data out
  1019. */
  1020. static int ap_probe(struct usb_interface *interface,
  1021. const struct usb_device_id *id)
  1022. {
  1023. struct es2_ap_dev *es2;
  1024. struct gb_host_device *hd;
  1025. struct usb_device *udev;
  1026. struct usb_host_interface *iface_desc;
  1027. struct usb_endpoint_descriptor *endpoint;
  1028. __u8 ep_addr;
  1029. int retval;
  1030. int i;
  1031. int num_cports;
  1032. bool bulk_out_found = false;
  1033. bool bulk_in_found = false;
  1034. bool arpc_in_found = false;
  1035. udev = usb_get_dev(interface_to_usbdev(interface));
  1036. num_cports = apb_get_cport_count(udev);
  1037. if (num_cports < 0) {
  1038. usb_put_dev(udev);
  1039. dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
  1040. num_cports);
  1041. return num_cports;
  1042. }
  1043. hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
  1044. num_cports);
  1045. if (IS_ERR(hd)) {
  1046. usb_put_dev(udev);
  1047. return PTR_ERR(hd);
  1048. }
  1049. es2 = hd_to_es2(hd);
  1050. es2->hd = hd;
  1051. es2->usb_intf = interface;
  1052. es2->usb_dev = udev;
  1053. spin_lock_init(&es2->cport_out_urb_lock);
  1054. INIT_KFIFO(es2->apb_log_fifo);
  1055. usb_set_intfdata(interface, es2);
  1056. /*
  1057. * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
  1058. * dynamically.
  1059. */
  1060. retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
  1061. if (retval)
  1062. goto error;
  1063. retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
  1064. if (retval)
  1065. goto error;
  1066. /* find all bulk endpoints */
  1067. iface_desc = interface->cur_altsetting;
  1068. for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
  1069. endpoint = &iface_desc->endpoint[i].desc;
  1070. ep_addr = endpoint->bEndpointAddress;
  1071. if (usb_endpoint_is_bulk_in(endpoint)) {
  1072. if (!bulk_in_found) {
  1073. es2->cport_in.endpoint = ep_addr;
  1074. bulk_in_found = true;
  1075. } else if (!arpc_in_found) {
  1076. es2->arpc_endpoint_in = ep_addr;
  1077. arpc_in_found = true;
  1078. } else {
  1079. dev_warn(&udev->dev,
  1080. "Unused bulk IN endpoint found: 0x%02x\n",
  1081. ep_addr);
  1082. }
  1083. continue;
  1084. }
  1085. if (usb_endpoint_is_bulk_out(endpoint)) {
  1086. if (!bulk_out_found) {
  1087. es2->cport_out_endpoint = ep_addr;
  1088. bulk_out_found = true;
  1089. } else {
  1090. dev_warn(&udev->dev,
  1091. "Unused bulk OUT endpoint found: 0x%02x\n",
  1092. ep_addr);
  1093. }
  1094. continue;
  1095. }
  1096. dev_warn(&udev->dev,
  1097. "Unknown endpoint type found, address 0x%02x\n",
  1098. ep_addr);
  1099. }
  1100. if (!bulk_in_found || !arpc_in_found || !bulk_out_found) {
  1101. dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
  1102. retval = -ENODEV;
  1103. goto error;
  1104. }
  1105. /* Allocate buffers for our cport in messages */
  1106. for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
  1107. struct urb *urb;
  1108. u8 *buffer;
  1109. urb = usb_alloc_urb(0, GFP_KERNEL);
  1110. if (!urb) {
  1111. retval = -ENOMEM;
  1112. goto error;
  1113. }
  1114. es2->cport_in.urb[i] = urb;
  1115. buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
  1116. if (!buffer) {
  1117. retval = -ENOMEM;
  1118. goto error;
  1119. }
  1120. usb_fill_bulk_urb(urb, udev,
  1121. usb_rcvbulkpipe(udev, es2->cport_in.endpoint),
  1122. buffer, ES2_GBUF_MSG_SIZE_MAX,
  1123. cport_in_callback, hd);
  1124. es2->cport_in.buffer[i] = buffer;
  1125. }
  1126. /* Allocate buffers for ARPC in messages */
  1127. for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
  1128. struct urb *urb;
  1129. u8 *buffer;
  1130. urb = usb_alloc_urb(0, GFP_KERNEL);
  1131. if (!urb) {
  1132. retval = -ENOMEM;
  1133. goto error;
  1134. }
  1135. es2->arpc_urb[i] = urb;
  1136. buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
  1137. if (!buffer) {
  1138. retval = -ENOMEM;
  1139. goto error;
  1140. }
  1141. usb_fill_bulk_urb(urb, udev,
  1142. usb_rcvbulkpipe(udev,
  1143. es2->arpc_endpoint_in),
  1144. buffer, ARPC_IN_SIZE_MAX,
  1145. arpc_in_callback, es2);
  1146. es2->arpc_buffer[i] = buffer;
  1147. }
  1148. /* Allocate urbs for our CPort OUT messages */
  1149. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  1150. struct urb *urb;
  1151. urb = usb_alloc_urb(0, GFP_KERNEL);
  1152. if (!urb) {
  1153. retval = -ENOMEM;
  1154. goto error;
  1155. }
  1156. es2->cport_out_urb[i] = urb;
  1157. es2->cport_out_urb_busy[i] = false; /* just to be anal */
  1158. }
  1159. /* XXX We will need to rename this per APB */
  1160. es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
  1161. 0644,
  1162. gb_debugfs_get(), es2,
  1163. &apb_log_enable_fops);
  1164. INIT_LIST_HEAD(&es2->arpcs);
  1165. spin_lock_init(&es2->arpc_lock);
  1166. retval = es2_arpc_in_enable(es2);
  1167. if (retval)
  1168. goto error;
  1169. retval = gb_hd_add(hd);
  1170. if (retval)
  1171. goto err_disable_arpc_in;
  1172. retval = es2_cport_in_enable(es2, &es2->cport_in);
  1173. if (retval)
  1174. goto err_hd_del;
  1175. return 0;
  1176. err_hd_del:
  1177. gb_hd_del(hd);
  1178. err_disable_arpc_in:
  1179. es2_arpc_in_disable(es2);
  1180. error:
  1181. es2_destroy(es2);
  1182. return retval;
  1183. }
  1184. static void ap_disconnect(struct usb_interface *interface)
  1185. {
  1186. struct es2_ap_dev *es2 = usb_get_intfdata(interface);
  1187. gb_hd_del(es2->hd);
  1188. es2_cport_in_disable(es2, &es2->cport_in);
  1189. es2_arpc_in_disable(es2);
  1190. es2_destroy(es2);
  1191. }
  1192. static struct usb_driver es2_ap_driver = {
  1193. .name = "es2_ap_driver",
  1194. .probe = ap_probe,
  1195. .disconnect = ap_disconnect,
  1196. .id_table = id_table,
  1197. .soft_unbind = 1,
  1198. };
  1199. module_usb_driver(es2_ap_driver);
  1200. MODULE_LICENSE("GPL v2");
  1201. MODULE_AUTHOR("Greg Kroah-Hartman <[email protected]>");