f_fs.c 91 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * f_fs.c -- user mode file system API for USB composite function controllers
  4. *
  5. * Copyright (C) 2010 Samsung Electronics
  6. * Author: Michal Nazarewicz <[email protected]>
  7. *
  8. * Based on inode.c (GadgetFS) which was:
  9. * Copyright (C) 2003-2004 David Brownell
  10. * Copyright (C) 2003 Agilent Technologies
  11. */
  12. /* #define DEBUG */
  13. /* #define VERBOSE_DEBUG */
  14. #include <linux/blkdev.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/export.h>
  17. #include <linux/fs_parser.h>
  18. #include <linux/hid.h>
  19. #include <linux/mm.h>
  20. #include <linux/module.h>
  21. #include <linux/scatterlist.h>
  22. #include <linux/sched/signal.h>
  23. #include <linux/uio.h>
  24. #include <linux/vmalloc.h>
  25. #include <asm/unaligned.h>
  26. #include <linux/usb/ccid.h>
  27. #include <linux/usb/composite.h>
  28. #include <linux/usb/functionfs.h>
  29. #include <linux/aio.h>
  30. #include <linux/kthread.h>
  31. #include <linux/poll.h>
  32. #include <linux/eventfd.h>
  33. #include "u_fs.h"
  34. #include "u_f.h"
  35. #include "u_os_desc.h"
  36. #include "configfs.h"
  37. #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
  38. /* Reference counter handling */
  39. static void ffs_data_get(struct ffs_data *ffs);
  40. static void ffs_data_put(struct ffs_data *ffs);
  41. /* Creates new ffs_data object. */
  42. static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
  43. __attribute__((malloc));
  44. /* Opened counter handling. */
  45. static void ffs_data_opened(struct ffs_data *ffs);
  46. static void ffs_data_closed(struct ffs_data *ffs);
  47. /* Called with ffs->mutex held; take over ownership of data. */
  48. static int __must_check
  49. __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
  50. static int __must_check
  51. __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
  52. /* The function structure ***************************************************/
  53. struct ffs_ep;
  54. struct ffs_function {
  55. struct usb_configuration *conf;
  56. struct usb_gadget *gadget;
  57. struct ffs_data *ffs;
  58. struct ffs_ep *eps;
  59. u8 eps_revmap[16];
  60. short *interfaces_nums;
  61. struct usb_function function;
  62. };
  63. static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
  64. {
  65. return container_of(f, struct ffs_function, function);
  66. }
  67. static inline enum ffs_setup_state
  68. ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
  69. {
  70. return (enum ffs_setup_state)
  71. cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
  72. }
  73. static void ffs_func_eps_disable(struct ffs_function *func);
  74. static int __must_check ffs_func_eps_enable(struct ffs_function *func);
  75. static int ffs_func_bind(struct usb_configuration *,
  76. struct usb_function *);
  77. static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
  78. static void ffs_func_disable(struct usb_function *);
  79. static int ffs_func_setup(struct usb_function *,
  80. const struct usb_ctrlrequest *);
  81. static bool ffs_func_req_match(struct usb_function *,
  82. const struct usb_ctrlrequest *,
  83. bool config0);
  84. static void ffs_func_suspend(struct usb_function *);
  85. static void ffs_func_resume(struct usb_function *);
  86. static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
  87. static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
  88. /* The endpoints structures *************************************************/
  89. struct ffs_ep {
  90. struct usb_ep *ep; /* P: ffs->eps_lock */
  91. struct usb_request *req; /* P: epfile->mutex */
  92. /* [0]: full speed, [1]: high speed, [2]: super speed */
  93. struct usb_endpoint_descriptor *descs[3];
  94. u8 num;
  95. };
  96. struct ffs_epfile {
  97. /* Protects ep->ep and ep->req. */
  98. struct mutex mutex;
  99. struct ffs_data *ffs;
  100. struct ffs_ep *ep; /* P: ffs->eps_lock */
  101. struct dentry *dentry;
  102. /*
  103. * Buffer for holding data from partial reads which may happen since
  104. * we’re rounding user read requests to a multiple of a max packet size.
  105. *
  106. * The pointer is initialised with NULL value and may be set by
  107. * __ffs_epfile_read_data function to point to a temporary buffer.
  108. *
  109. * In normal operation, calls to __ffs_epfile_read_buffered will consume
  110. * data from said buffer and eventually free it. Importantly, while the
  111. * function is using the buffer, it sets the pointer to NULL. This is
  112. * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
  113. * can never run concurrently (they are synchronised by epfile->mutex)
  114. * so the latter will not assign a new value to the pointer.
  115. *
  116. * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
  117. * valid) and sets the pointer to READ_BUFFER_DROP value. This special
  118. * value is crux of the synchronisation between ffs_func_eps_disable and
  119. * __ffs_epfile_read_data.
  120. *
  121. * Once __ffs_epfile_read_data is about to finish it will try to set the
  122. * pointer back to its old value (as described above), but seeing as the
  123. * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
  124. * the buffer.
  125. *
  126. * == State transitions ==
  127. *
  128. * • ptr == NULL: (initial state)
  129. * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
  130. * ◦ __ffs_epfile_read_buffered: nop
  131. * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
  132. * ◦ reading finishes: n/a, not in ‘and reading’ state
  133. * • ptr == DROP:
  134. * ◦ __ffs_epfile_read_buffer_free: nop
  135. * ◦ __ffs_epfile_read_buffered: go to ptr == NULL
  136. * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
  137. * ◦ reading finishes: n/a, not in ‘and reading’ state
  138. * • ptr == buf:
  139. * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
  140. * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading
  141. * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered
  142. * is always called first
  143. * ◦ reading finishes: n/a, not in ‘and reading’ state
  144. * • ptr == NULL and reading:
  145. * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
  146. * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
  147. * ◦ __ffs_epfile_read_data: n/a, mutex is held
  148. * ◦ reading finishes and …
  149. * … all data read: free buf, go to ptr == NULL
  150. * … otherwise: go to ptr == buf and reading
  151. * • ptr == DROP and reading:
  152. * ◦ __ffs_epfile_read_buffer_free: nop
  153. * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
  154. * ◦ __ffs_epfile_read_data: n/a, mutex is held
  155. * ◦ reading finishes: free buf, go to ptr == DROP
  156. */
  157. struct ffs_buffer *read_buffer;
  158. #define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
  159. char name[5];
  160. unsigned char in; /* P: ffs->eps_lock */
  161. unsigned char isoc; /* P: ffs->eps_lock */
  162. unsigned char _pad;
  163. };
  164. struct ffs_buffer {
  165. size_t length;
  166. char *data;
  167. char storage[];
  168. };
  169. /* ffs_io_data structure ***************************************************/
  170. struct ffs_io_data {
  171. bool aio;
  172. bool read;
  173. struct kiocb *kiocb;
  174. struct iov_iter data;
  175. const void *to_free;
  176. char *buf;
  177. struct mm_struct *mm;
  178. struct work_struct work;
  179. struct usb_ep *ep;
  180. struct usb_request *req;
  181. struct sg_table sgt;
  182. bool use_sg;
  183. struct ffs_data *ffs;
  184. int status;
  185. struct completion done;
  186. };
  187. struct ffs_desc_helper {
  188. struct ffs_data *ffs;
  189. unsigned interfaces_count;
  190. unsigned eps_count;
  191. };
  192. static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
  193. static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
  194. static struct dentry *
  195. ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
  196. const struct file_operations *fops);
  197. /* Devices management *******************************************************/
  198. DEFINE_MUTEX(ffs_lock);
  199. EXPORT_SYMBOL_GPL(ffs_lock);
  200. static struct ffs_dev *_ffs_find_dev(const char *name);
  201. static struct ffs_dev *_ffs_alloc_dev(void);
  202. static void _ffs_free_dev(struct ffs_dev *dev);
  203. static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data);
  204. static void ffs_release_dev(struct ffs_dev *ffs_dev);
  205. static int ffs_ready(struct ffs_data *ffs);
  206. static void ffs_closed(struct ffs_data *ffs);
  207. /* Misc helper functions ****************************************************/
  208. static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
  209. __attribute__((warn_unused_result, nonnull));
  210. static char *ffs_prepare_buffer(const char __user *buf, size_t len)
  211. __attribute__((warn_unused_result, nonnull));
  212. /* Control file aka ep0 *****************************************************/
  213. static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
  214. {
  215. struct ffs_data *ffs = req->context;
  216. complete(&ffs->ep0req_completion);
  217. }
  218. static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
  219. __releases(&ffs->ev.waitq.lock)
  220. {
  221. struct usb_request *req = ffs->ep0req;
  222. int ret;
  223. if (!req) {
  224. spin_unlock_irq(&ffs->ev.waitq.lock);
  225. return -EINVAL;
  226. }
  227. req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
  228. spin_unlock_irq(&ffs->ev.waitq.lock);
  229. req->buf = data;
  230. req->length = len;
  231. /*
  232. * UDC layer requires to provide a buffer even for ZLP, but should
  233. * not use it at all. Let's provide some poisoned pointer to catch
  234. * possible bug in the driver.
  235. */
  236. if (req->buf == NULL)
  237. req->buf = (void *)0xDEADBABE;
  238. reinit_completion(&ffs->ep0req_completion);
  239. ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
  240. if (ret < 0)
  241. return ret;
  242. ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
  243. if (ret) {
  244. usb_ep_dequeue(ffs->gadget->ep0, req);
  245. return -EINTR;
  246. }
  247. ffs->setup_state = FFS_NO_SETUP;
  248. return req->status ? req->status : req->actual;
  249. }
  250. static int __ffs_ep0_stall(struct ffs_data *ffs)
  251. {
  252. if (ffs->ev.can_stall) {
  253. pr_vdebug("ep0 stall\n");
  254. usb_ep_set_halt(ffs->gadget->ep0);
  255. ffs->setup_state = FFS_NO_SETUP;
  256. return -EL2HLT;
  257. } else {
  258. pr_debug("bogus ep0 stall!\n");
  259. return -ESRCH;
  260. }
  261. }
  262. static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
  263. size_t len, loff_t *ptr)
  264. {
  265. struct ffs_data *ffs = file->private_data;
  266. ssize_t ret;
  267. char *data;
  268. ENTER();
  269. /* Fast check if setup was canceled */
  270. if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
  271. return -EIDRM;
  272. /* Acquire mutex */
  273. ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
  274. if (ret < 0)
  275. return ret;
  276. /* Check state */
  277. switch (ffs->state) {
  278. case FFS_READ_DESCRIPTORS:
  279. case FFS_READ_STRINGS:
  280. /* Copy data */
  281. if (len < 16) {
  282. ret = -EINVAL;
  283. break;
  284. }
  285. data = ffs_prepare_buffer(buf, len);
  286. if (IS_ERR(data)) {
  287. ret = PTR_ERR(data);
  288. break;
  289. }
  290. /* Handle data */
  291. if (ffs->state == FFS_READ_DESCRIPTORS) {
  292. pr_info("read descriptors\n");
  293. ret = __ffs_data_got_descs(ffs, data, len);
  294. if (ret < 0)
  295. break;
  296. ffs->state = FFS_READ_STRINGS;
  297. ret = len;
  298. } else {
  299. pr_info("read strings\n");
  300. ret = __ffs_data_got_strings(ffs, data, len);
  301. if (ret < 0)
  302. break;
  303. ret = ffs_epfiles_create(ffs);
  304. if (ret) {
  305. ffs->state = FFS_CLOSING;
  306. break;
  307. }
  308. ffs->state = FFS_ACTIVE;
  309. mutex_unlock(&ffs->mutex);
  310. ret = ffs_ready(ffs);
  311. if (ret < 0) {
  312. ffs->state = FFS_CLOSING;
  313. return ret;
  314. }
  315. return len;
  316. }
  317. break;
  318. case FFS_ACTIVE:
  319. data = NULL;
  320. /*
  321. * We're called from user space, we can use _irq
  322. * rather then _irqsave
  323. */
  324. spin_lock_irq(&ffs->ev.waitq.lock);
  325. switch (ffs_setup_state_clear_cancelled(ffs)) {
  326. case FFS_SETUP_CANCELLED:
  327. ret = -EIDRM;
  328. goto done_spin;
  329. case FFS_NO_SETUP:
  330. ret = -ESRCH;
  331. goto done_spin;
  332. case FFS_SETUP_PENDING:
  333. break;
  334. }
  335. /* FFS_SETUP_PENDING */
  336. if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
  337. spin_unlock_irq(&ffs->ev.waitq.lock);
  338. ret = __ffs_ep0_stall(ffs);
  339. break;
  340. }
  341. /* FFS_SETUP_PENDING and not stall */
  342. len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
  343. spin_unlock_irq(&ffs->ev.waitq.lock);
  344. data = ffs_prepare_buffer(buf, len);
  345. if (IS_ERR(data)) {
  346. ret = PTR_ERR(data);
  347. break;
  348. }
  349. spin_lock_irq(&ffs->ev.waitq.lock);
  350. /*
  351. * We are guaranteed to be still in FFS_ACTIVE state
  352. * but the state of setup could have changed from
  353. * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
  354. * to check for that. If that happened we copied data
  355. * from user space in vain but it's unlikely.
  356. *
  357. * For sure we are not in FFS_NO_SETUP since this is
  358. * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
  359. * transition can be performed and it's protected by
  360. * mutex.
  361. */
  362. if (ffs_setup_state_clear_cancelled(ffs) ==
  363. FFS_SETUP_CANCELLED) {
  364. ret = -EIDRM;
  365. done_spin:
  366. spin_unlock_irq(&ffs->ev.waitq.lock);
  367. } else {
  368. /* unlocks spinlock */
  369. ret = __ffs_ep0_queue_wait(ffs, data, len);
  370. }
  371. kfree(data);
  372. break;
  373. default:
  374. ret = -EBADFD;
  375. break;
  376. }
  377. mutex_unlock(&ffs->mutex);
  378. return ret;
  379. }
  380. /* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
  381. static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
  382. size_t n)
  383. __releases(&ffs->ev.waitq.lock)
  384. {
  385. /*
  386. * n cannot be bigger than ffs->ev.count, which cannot be bigger than
  387. * size of ffs->ev.types array (which is four) so that's how much space
  388. * we reserve.
  389. */
  390. struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
  391. const size_t size = n * sizeof *events;
  392. unsigned i = 0;
  393. memset(events, 0, size);
  394. do {
  395. events[i].type = ffs->ev.types[i];
  396. if (events[i].type == FUNCTIONFS_SETUP) {
  397. events[i].u.setup = ffs->ev.setup;
  398. ffs->setup_state = FFS_SETUP_PENDING;
  399. }
  400. } while (++i < n);
  401. ffs->ev.count -= n;
  402. if (ffs->ev.count)
  403. memmove(ffs->ev.types, ffs->ev.types + n,
  404. ffs->ev.count * sizeof *ffs->ev.types);
  405. spin_unlock_irq(&ffs->ev.waitq.lock);
  406. mutex_unlock(&ffs->mutex);
  407. return copy_to_user(buf, events, size) ? -EFAULT : size;
  408. }
  409. static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
  410. size_t len, loff_t *ptr)
  411. {
  412. struct ffs_data *ffs = file->private_data;
  413. char *data = NULL;
  414. size_t n;
  415. int ret;
  416. ENTER();
  417. /* Fast check if setup was canceled */
  418. if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
  419. return -EIDRM;
  420. /* Acquire mutex */
  421. ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
  422. if (ret < 0)
  423. return ret;
  424. /* Check state */
  425. if (ffs->state != FFS_ACTIVE) {
  426. ret = -EBADFD;
  427. goto done_mutex;
  428. }
  429. /*
  430. * We're called from user space, we can use _irq rather then
  431. * _irqsave
  432. */
  433. spin_lock_irq(&ffs->ev.waitq.lock);
  434. switch (ffs_setup_state_clear_cancelled(ffs)) {
  435. case FFS_SETUP_CANCELLED:
  436. ret = -EIDRM;
  437. break;
  438. case FFS_NO_SETUP:
  439. n = len / sizeof(struct usb_functionfs_event);
  440. if (!n) {
  441. ret = -EINVAL;
  442. break;
  443. }
  444. if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
  445. ret = -EAGAIN;
  446. break;
  447. }
  448. if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
  449. ffs->ev.count)) {
  450. ret = -EINTR;
  451. break;
  452. }
  453. /* unlocks spinlock */
  454. return __ffs_ep0_read_events(ffs, buf,
  455. min(n, (size_t)ffs->ev.count));
  456. case FFS_SETUP_PENDING:
  457. if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
  458. spin_unlock_irq(&ffs->ev.waitq.lock);
  459. ret = __ffs_ep0_stall(ffs);
  460. goto done_mutex;
  461. }
  462. len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
  463. spin_unlock_irq(&ffs->ev.waitq.lock);
  464. if (len) {
  465. data = kmalloc(len, GFP_KERNEL);
  466. if (!data) {
  467. ret = -ENOMEM;
  468. goto done_mutex;
  469. }
  470. }
  471. spin_lock_irq(&ffs->ev.waitq.lock);
  472. /* See ffs_ep0_write() */
  473. if (ffs_setup_state_clear_cancelled(ffs) ==
  474. FFS_SETUP_CANCELLED) {
  475. ret = -EIDRM;
  476. break;
  477. }
  478. /* unlocks spinlock */
  479. ret = __ffs_ep0_queue_wait(ffs, data, len);
  480. if ((ret > 0) && (copy_to_user(buf, data, len)))
  481. ret = -EFAULT;
  482. goto done_mutex;
  483. default:
  484. ret = -EBADFD;
  485. break;
  486. }
  487. spin_unlock_irq(&ffs->ev.waitq.lock);
  488. done_mutex:
  489. mutex_unlock(&ffs->mutex);
  490. kfree(data);
  491. return ret;
  492. }
  493. static int ffs_ep0_open(struct inode *inode, struct file *file)
  494. {
  495. struct ffs_data *ffs = inode->i_private;
  496. ENTER();
  497. if (ffs->state == FFS_CLOSING)
  498. return -EBUSY;
  499. file->private_data = ffs;
  500. ffs_data_opened(ffs);
  501. return stream_open(inode, file);
  502. }
  503. static int ffs_ep0_release(struct inode *inode, struct file *file)
  504. {
  505. struct ffs_data *ffs = file->private_data;
  506. ENTER();
  507. ffs_data_closed(ffs);
  508. return 0;
  509. }
  510. static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
  511. {
  512. struct ffs_data *ffs = file->private_data;
  513. struct usb_gadget *gadget = ffs->gadget;
  514. long ret;
  515. ENTER();
  516. if (code == FUNCTIONFS_INTERFACE_REVMAP) {
  517. struct ffs_function *func = ffs->func;
  518. ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
  519. } else if (gadget && gadget->ops->ioctl) {
  520. ret = gadget->ops->ioctl(gadget, code, value);
  521. } else {
  522. ret = -ENOTTY;
  523. }
  524. return ret;
  525. }
  526. static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
  527. {
  528. struct ffs_data *ffs = file->private_data;
  529. __poll_t mask = EPOLLWRNORM;
  530. int ret;
  531. poll_wait(file, &ffs->ev.waitq, wait);
  532. ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
  533. if (ret < 0)
  534. return mask;
  535. switch (ffs->state) {
  536. case FFS_READ_DESCRIPTORS:
  537. case FFS_READ_STRINGS:
  538. mask |= EPOLLOUT;
  539. break;
  540. case FFS_ACTIVE:
  541. switch (ffs->setup_state) {
  542. case FFS_NO_SETUP:
  543. if (ffs->ev.count)
  544. mask |= EPOLLIN;
  545. break;
  546. case FFS_SETUP_PENDING:
  547. case FFS_SETUP_CANCELLED:
  548. mask |= (EPOLLIN | EPOLLOUT);
  549. break;
  550. }
  551. break;
  552. case FFS_CLOSING:
  553. break;
  554. case FFS_DEACTIVATED:
  555. break;
  556. }
  557. mutex_unlock(&ffs->mutex);
  558. return mask;
  559. }
  560. static const struct file_operations ffs_ep0_operations = {
  561. .llseek = no_llseek,
  562. .open = ffs_ep0_open,
  563. .write = ffs_ep0_write,
  564. .read = ffs_ep0_read,
  565. .release = ffs_ep0_release,
  566. .unlocked_ioctl = ffs_ep0_ioctl,
  567. .poll = ffs_ep0_poll,
  568. };
  569. /* "Normal" endpoints operations ********************************************/
  570. static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
  571. {
  572. struct ffs_io_data *io_data = req->context;
  573. ENTER();
  574. if (req->status)
  575. io_data->status = req->status;
  576. else
  577. io_data->status = req->actual;
  578. complete(&io_data->done);
  579. }
  580. static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
  581. {
  582. ssize_t ret = copy_to_iter(data, data_len, iter);
  583. if (ret == data_len)
  584. return ret;
  585. if (iov_iter_count(iter))
  586. return -EFAULT;
  587. /*
  588. * Dear user space developer!
  589. *
  590. * TL;DR: To stop getting below error message in your kernel log, change
  591. * user space code using functionfs to align read buffers to a max
  592. * packet size.
  593. *
  594. * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
  595. * packet size. When unaligned buffer is passed to functionfs, it
  596. * internally uses a larger, aligned buffer so that such UDCs are happy.
  597. *
  598. * Unfortunately, this means that host may send more data than was
  599. * requested in read(2) system call. f_fs doesn’t know what to do with
  600. * that excess data so it simply drops it.
  601. *
  602. * Was the buffer aligned in the first place, no such problem would
  603. * happen.
  604. *
  605. * Data may be dropped only in AIO reads. Synchronous reads are handled
  606. * by splitting a request into multiple parts. This splitting may still
  607. * be a problem though so it’s likely best to align the buffer
  608. * regardless of it being AIO or not..
  609. *
  610. * This only affects OUT endpoints, i.e. reading data with a read(2),
  611. * aio_read(2) etc. system calls. Writing data to an IN endpoint is not
  612. * affected.
  613. */
  614. pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
  615. "Align read buffer size to max packet size to avoid the problem.\n",
  616. data_len, ret);
  617. return ret;
  618. }
  619. /*
  620. * allocate a virtually contiguous buffer and create a scatterlist describing it
  621. * @sg_table - pointer to a place to be filled with sg_table contents
  622. * @size - required buffer size
  623. */
  624. static void *ffs_build_sg_list(struct sg_table *sgt, size_t sz)
  625. {
  626. struct page **pages;
  627. void *vaddr, *ptr;
  628. unsigned int n_pages;
  629. int i;
  630. vaddr = vmalloc(sz);
  631. if (!vaddr)
  632. return NULL;
  633. n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
  634. pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
  635. if (!pages) {
  636. vfree(vaddr);
  637. return NULL;
  638. }
  639. for (i = 0, ptr = vaddr; i < n_pages; ++i, ptr += PAGE_SIZE)
  640. pages[i] = vmalloc_to_page(ptr);
  641. if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL)) {
  642. kvfree(pages);
  643. vfree(vaddr);
  644. return NULL;
  645. }
  646. kvfree(pages);
  647. return vaddr;
  648. }
  649. static inline void *ffs_alloc_buffer(struct ffs_io_data *io_data,
  650. size_t data_len)
  651. {
  652. if (io_data->use_sg)
  653. return ffs_build_sg_list(&io_data->sgt, data_len);
  654. return kmalloc(data_len, GFP_KERNEL);
  655. }
  656. static inline void ffs_free_buffer(struct ffs_io_data *io_data)
  657. {
  658. if (!io_data->buf)
  659. return;
  660. if (io_data->use_sg) {
  661. sg_free_table(&io_data->sgt);
  662. vfree(io_data->buf);
  663. } else {
  664. kfree(io_data->buf);
  665. }
  666. }
  667. static void ffs_user_copy_worker(struct work_struct *work)
  668. {
  669. struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
  670. work);
  671. int ret = io_data->req->status ? io_data->req->status :
  672. io_data->req->actual;
  673. bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
  674. if (io_data->read && ret > 0) {
  675. kthread_use_mm(io_data->mm);
  676. ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
  677. kthread_unuse_mm(io_data->mm);
  678. }
  679. io_data->kiocb->ki_complete(io_data->kiocb, ret);
  680. if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
  681. eventfd_signal(io_data->ffs->ffs_eventfd, 1);
  682. usb_ep_free_request(io_data->ep, io_data->req);
  683. if (io_data->read)
  684. kfree(io_data->to_free);
  685. ffs_free_buffer(io_data);
  686. kfree(io_data);
  687. }
  688. static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
  689. struct usb_request *req)
  690. {
  691. struct ffs_io_data *io_data = req->context;
  692. struct ffs_data *ffs = io_data->ffs;
  693. ENTER();
  694. INIT_WORK(&io_data->work, ffs_user_copy_worker);
  695. queue_work(ffs->io_completion_wq, &io_data->work);
  696. }
  697. static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
  698. {
  699. /*
  700. * See comment in struct ffs_epfile for full read_buffer pointer
  701. * synchronisation story.
  702. */
  703. struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
  704. if (buf && buf != READ_BUFFER_DROP)
  705. kfree(buf);
  706. }
  707. /* Assumes epfile->mutex is held. */
  708. static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
  709. struct iov_iter *iter)
  710. {
  711. /*
  712. * Null out epfile->read_buffer so ffs_func_eps_disable does not free
  713. * the buffer while we are using it. See comment in struct ffs_epfile
  714. * for full read_buffer pointer synchronisation story.
  715. */
  716. struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
  717. ssize_t ret;
  718. if (!buf || buf == READ_BUFFER_DROP)
  719. return 0;
  720. ret = copy_to_iter(buf->data, buf->length, iter);
  721. if (buf->length == ret) {
  722. kfree(buf);
  723. return ret;
  724. }
  725. if (iov_iter_count(iter)) {
  726. ret = -EFAULT;
  727. } else {
  728. buf->length -= ret;
  729. buf->data += ret;
  730. }
  731. if (cmpxchg(&epfile->read_buffer, NULL, buf))
  732. kfree(buf);
  733. return ret;
  734. }
  735. /* Assumes epfile->mutex is held. */
  736. static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
  737. void *data, int data_len,
  738. struct iov_iter *iter)
  739. {
  740. struct ffs_buffer *buf;
  741. ssize_t ret = copy_to_iter(data, data_len, iter);
  742. if (data_len == ret)
  743. return ret;
  744. if (iov_iter_count(iter))
  745. return -EFAULT;
  746. /* See ffs_copy_to_iter for more context. */
  747. pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
  748. data_len, ret);
  749. data_len -= ret;
  750. buf = kmalloc(struct_size(buf, storage, data_len), GFP_KERNEL);
  751. if (!buf)
  752. return -ENOMEM;
  753. buf->length = data_len;
  754. buf->data = buf->storage;
  755. memcpy(buf->storage, data + ret, flex_array_size(buf, storage, data_len));
  756. /*
  757. * At this point read_buffer is NULL or READ_BUFFER_DROP (if
  758. * ffs_func_eps_disable has been called in the meanwhile). See comment
  759. * in struct ffs_epfile for full read_buffer pointer synchronisation
  760. * story.
  761. */
  762. if (cmpxchg(&epfile->read_buffer, NULL, buf))
  763. kfree(buf);
  764. return ret;
  765. }
  766. static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
  767. {
  768. struct ffs_epfile *epfile = file->private_data;
  769. struct usb_request *req;
  770. struct ffs_ep *ep;
  771. char *data = NULL;
  772. ssize_t ret, data_len = -EINVAL;
  773. int halt;
  774. /* Are we still active? */
  775. if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
  776. return -ENODEV;
  777. /* Wait for endpoint to be enabled */
  778. ep = epfile->ep;
  779. if (!ep) {
  780. if (file->f_flags & O_NONBLOCK)
  781. return -EAGAIN;
  782. ret = wait_event_interruptible(
  783. epfile->ffs->wait, (ep = epfile->ep));
  784. if (ret)
  785. return -EINTR;
  786. }
  787. /* Do we halt? */
  788. halt = (!io_data->read == !epfile->in);
  789. if (halt && epfile->isoc)
  790. return -EINVAL;
  791. /* We will be using request and read_buffer */
  792. ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
  793. if (ret)
  794. goto error;
  795. /* Allocate & copy */
  796. if (!halt) {
  797. struct usb_gadget *gadget;
  798. /*
  799. * Do we have buffered data from previous partial read? Check
  800. * that for synchronous case only because we do not have
  801. * facility to ‘wake up’ a pending asynchronous read and push
  802. * buffered data to it which we would need to make things behave
  803. * consistently.
  804. */
  805. if (!io_data->aio && io_data->read) {
  806. ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
  807. if (ret)
  808. goto error_mutex;
  809. }
  810. /*
  811. * if we _do_ wait above, the epfile->ffs->gadget might be NULL
  812. * before the waiting completes, so do not assign to 'gadget'
  813. * earlier
  814. */
  815. gadget = epfile->ffs->gadget;
  816. spin_lock_irq(&epfile->ffs->eps_lock);
  817. /* In the meantime, endpoint got disabled or changed. */
  818. if (epfile->ep != ep) {
  819. ret = -ESHUTDOWN;
  820. goto error_lock;
  821. }
  822. data_len = iov_iter_count(&io_data->data);
  823. /*
  824. * Controller may require buffer size to be aligned to
  825. * maxpacketsize of an out endpoint.
  826. */
  827. if (io_data->read)
  828. data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
  829. io_data->use_sg = gadget->sg_supported && data_len > PAGE_SIZE;
  830. spin_unlock_irq(&epfile->ffs->eps_lock);
  831. data = ffs_alloc_buffer(io_data, data_len);
  832. if (!data) {
  833. ret = -ENOMEM;
  834. goto error_mutex;
  835. }
  836. if (!io_data->read &&
  837. !copy_from_iter_full(data, data_len, &io_data->data)) {
  838. ret = -EFAULT;
  839. goto error_mutex;
  840. }
  841. }
  842. spin_lock_irq(&epfile->ffs->eps_lock);
  843. if (epfile->ep != ep) {
  844. /* In the meantime, endpoint got disabled or changed. */
  845. ret = -ESHUTDOWN;
  846. } else if (halt) {
  847. ret = usb_ep_set_halt(ep->ep);
  848. if (!ret)
  849. ret = -EBADMSG;
  850. } else if (data_len == -EINVAL) {
  851. /*
  852. * Sanity Check: even though data_len can't be used
  853. * uninitialized at the time I write this comment, some
  854. * compilers complain about this situation.
  855. * In order to keep the code clean from warnings, data_len is
  856. * being initialized to -EINVAL during its declaration, which
  857. * means we can't rely on compiler anymore to warn no future
  858. * changes won't result in data_len being used uninitialized.
  859. * For such reason, we're adding this redundant sanity check
  860. * here.
  861. */
  862. WARN(1, "%s: data_len == -EINVAL\n", __func__);
  863. ret = -EINVAL;
  864. } else if (!io_data->aio) {
  865. bool interrupted = false;
  866. req = ep->req;
  867. if (io_data->use_sg) {
  868. req->buf = NULL;
  869. req->sg = io_data->sgt.sgl;
  870. req->num_sgs = io_data->sgt.nents;
  871. } else {
  872. req->buf = data;
  873. req->num_sgs = 0;
  874. }
  875. req->length = data_len;
  876. io_data->buf = data;
  877. init_completion(&io_data->done);
  878. req->context = io_data;
  879. req->complete = ffs_epfile_io_complete;
  880. ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
  881. if (ret < 0)
  882. goto error_lock;
  883. spin_unlock_irq(&epfile->ffs->eps_lock);
  884. if (wait_for_completion_interruptible(&io_data->done)) {
  885. spin_lock_irq(&epfile->ffs->eps_lock);
  886. if (epfile->ep != ep) {
  887. ret = -ESHUTDOWN;
  888. goto error_lock;
  889. }
  890. /*
  891. * To avoid race condition with ffs_epfile_io_complete,
  892. * dequeue the request first then check
  893. * status. usb_ep_dequeue API should guarantee no race
  894. * condition with req->complete callback.
  895. */
  896. usb_ep_dequeue(ep->ep, req);
  897. spin_unlock_irq(&epfile->ffs->eps_lock);
  898. wait_for_completion(&io_data->done);
  899. interrupted = io_data->status < 0;
  900. }
  901. if (interrupted)
  902. ret = -EINTR;
  903. else if (io_data->read && io_data->status > 0)
  904. ret = __ffs_epfile_read_data(epfile, data, io_data->status,
  905. &io_data->data);
  906. else
  907. ret = io_data->status;
  908. goto error_mutex;
  909. } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
  910. ret = -ENOMEM;
  911. } else {
  912. if (io_data->use_sg) {
  913. req->buf = NULL;
  914. req->sg = io_data->sgt.sgl;
  915. req->num_sgs = io_data->sgt.nents;
  916. } else {
  917. req->buf = data;
  918. req->num_sgs = 0;
  919. }
  920. req->length = data_len;
  921. io_data->buf = data;
  922. io_data->ep = ep->ep;
  923. io_data->req = req;
  924. io_data->ffs = epfile->ffs;
  925. req->context = io_data;
  926. req->complete = ffs_epfile_async_io_complete;
  927. ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
  928. if (ret) {
  929. io_data->req = NULL;
  930. usb_ep_free_request(ep->ep, req);
  931. goto error_lock;
  932. }
  933. ret = -EIOCBQUEUED;
  934. /*
  935. * Do not kfree the buffer in this function. It will be freed
  936. * by ffs_user_copy_worker.
  937. */
  938. data = NULL;
  939. }
  940. error_lock:
  941. spin_unlock_irq(&epfile->ffs->eps_lock);
  942. error_mutex:
  943. mutex_unlock(&epfile->mutex);
  944. error:
  945. if (ret != -EIOCBQUEUED) /* don't free if there is iocb queued */
  946. ffs_free_buffer(io_data);
  947. return ret;
  948. }
  949. static int
  950. ffs_epfile_open(struct inode *inode, struct file *file)
  951. {
  952. struct ffs_epfile *epfile = inode->i_private;
  953. ENTER();
  954. if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
  955. return -ENODEV;
  956. file->private_data = epfile;
  957. ffs_data_opened(epfile->ffs);
  958. return stream_open(inode, file);
  959. }
  960. static int ffs_aio_cancel(struct kiocb *kiocb)
  961. {
  962. struct ffs_io_data *io_data = kiocb->private;
  963. struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
  964. unsigned long flags;
  965. int value;
  966. ENTER();
  967. spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
  968. if (io_data && io_data->ep && io_data->req)
  969. value = usb_ep_dequeue(io_data->ep, io_data->req);
  970. else
  971. value = -EINVAL;
  972. spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
  973. return value;
  974. }
  975. static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
  976. {
  977. struct ffs_io_data io_data, *p = &io_data;
  978. ssize_t res;
  979. ENTER();
  980. if (!is_sync_kiocb(kiocb)) {
  981. p = kzalloc(sizeof(io_data), GFP_KERNEL);
  982. if (!p)
  983. return -ENOMEM;
  984. p->aio = true;
  985. } else {
  986. memset(p, 0, sizeof(*p));
  987. p->aio = false;
  988. }
  989. p->read = false;
  990. p->kiocb = kiocb;
  991. p->data = *from;
  992. p->mm = current->mm;
  993. kiocb->private = p;
  994. if (p->aio)
  995. kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
  996. res = ffs_epfile_io(kiocb->ki_filp, p);
  997. if (res == -EIOCBQUEUED)
  998. return res;
  999. if (p->aio)
  1000. kfree(p);
  1001. else
  1002. *from = p->data;
  1003. return res;
  1004. }
  1005. static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
  1006. {
  1007. struct ffs_io_data io_data, *p = &io_data;
  1008. ssize_t res;
  1009. ENTER();
  1010. if (!is_sync_kiocb(kiocb)) {
  1011. p = kzalloc(sizeof(io_data), GFP_KERNEL);
  1012. if (!p)
  1013. return -ENOMEM;
  1014. p->aio = true;
  1015. } else {
  1016. memset(p, 0, sizeof(*p));
  1017. p->aio = false;
  1018. }
  1019. p->read = true;
  1020. p->kiocb = kiocb;
  1021. if (p->aio) {
  1022. p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
  1023. if (!p->to_free) {
  1024. kfree(p);
  1025. return -ENOMEM;
  1026. }
  1027. } else {
  1028. p->data = *to;
  1029. p->to_free = NULL;
  1030. }
  1031. p->mm = current->mm;
  1032. kiocb->private = p;
  1033. if (p->aio)
  1034. kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
  1035. res = ffs_epfile_io(kiocb->ki_filp, p);
  1036. if (res == -EIOCBQUEUED)
  1037. return res;
  1038. if (p->aio) {
  1039. kfree(p->to_free);
  1040. kfree(p);
  1041. } else {
  1042. *to = p->data;
  1043. }
  1044. return res;
  1045. }
  1046. static int
  1047. ffs_epfile_release(struct inode *inode, struct file *file)
  1048. {
  1049. struct ffs_epfile *epfile = inode->i_private;
  1050. ENTER();
  1051. __ffs_epfile_read_buffer_free(epfile);
  1052. ffs_data_closed(epfile->ffs);
  1053. return 0;
  1054. }
  1055. static long ffs_epfile_ioctl(struct file *file, unsigned code,
  1056. unsigned long value)
  1057. {
  1058. struct ffs_epfile *epfile = file->private_data;
  1059. struct ffs_ep *ep;
  1060. int ret;
  1061. ENTER();
  1062. if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
  1063. return -ENODEV;
  1064. /* Wait for endpoint to be enabled */
  1065. ep = epfile->ep;
  1066. if (!ep) {
  1067. if (file->f_flags & O_NONBLOCK)
  1068. return -EAGAIN;
  1069. ret = wait_event_interruptible(
  1070. epfile->ffs->wait, (ep = epfile->ep));
  1071. if (ret)
  1072. return -EINTR;
  1073. }
  1074. spin_lock_irq(&epfile->ffs->eps_lock);
  1075. /* In the meantime, endpoint got disabled or changed. */
  1076. if (epfile->ep != ep) {
  1077. spin_unlock_irq(&epfile->ffs->eps_lock);
  1078. return -ESHUTDOWN;
  1079. }
  1080. switch (code) {
  1081. case FUNCTIONFS_FIFO_STATUS:
  1082. ret = usb_ep_fifo_status(epfile->ep->ep);
  1083. break;
  1084. case FUNCTIONFS_FIFO_FLUSH:
  1085. usb_ep_fifo_flush(epfile->ep->ep);
  1086. ret = 0;
  1087. break;
  1088. case FUNCTIONFS_CLEAR_HALT:
  1089. ret = usb_ep_clear_halt(epfile->ep->ep);
  1090. break;
  1091. case FUNCTIONFS_ENDPOINT_REVMAP:
  1092. ret = epfile->ep->num;
  1093. break;
  1094. case FUNCTIONFS_ENDPOINT_DESC:
  1095. {
  1096. int desc_idx;
  1097. struct usb_endpoint_descriptor desc1, *desc;
  1098. switch (epfile->ffs->gadget->speed) {
  1099. case USB_SPEED_SUPER:
  1100. case USB_SPEED_SUPER_PLUS:
  1101. desc_idx = 2;
  1102. break;
  1103. case USB_SPEED_HIGH:
  1104. desc_idx = 1;
  1105. break;
  1106. default:
  1107. desc_idx = 0;
  1108. }
  1109. desc = epfile->ep->descs[desc_idx];
  1110. memcpy(&desc1, desc, desc->bLength);
  1111. spin_unlock_irq(&epfile->ffs->eps_lock);
  1112. ret = copy_to_user((void __user *)value, &desc1, desc1.bLength);
  1113. if (ret)
  1114. ret = -EFAULT;
  1115. return ret;
  1116. }
  1117. default:
  1118. ret = -ENOTTY;
  1119. }
  1120. spin_unlock_irq(&epfile->ffs->eps_lock);
  1121. return ret;
  1122. }
  1123. static const struct file_operations ffs_epfile_operations = {
  1124. .llseek = no_llseek,
  1125. .open = ffs_epfile_open,
  1126. .write_iter = ffs_epfile_write_iter,
  1127. .read_iter = ffs_epfile_read_iter,
  1128. .release = ffs_epfile_release,
  1129. .unlocked_ioctl = ffs_epfile_ioctl,
  1130. .compat_ioctl = compat_ptr_ioctl,
  1131. };
  1132. /* File system and super block operations ***********************************/
  1133. /*
  1134. * Mounting the file system creates a controller file, used first for
  1135. * function configuration then later for event monitoring.
  1136. */
  1137. static struct inode *__must_check
  1138. ffs_sb_make_inode(struct super_block *sb, void *data,
  1139. const struct file_operations *fops,
  1140. const struct inode_operations *iops,
  1141. struct ffs_file_perms *perms)
  1142. {
  1143. struct inode *inode;
  1144. ENTER();
  1145. inode = new_inode(sb);
  1146. if (inode) {
  1147. struct timespec64 ts = current_time(inode);
  1148. inode->i_ino = get_next_ino();
  1149. inode->i_mode = perms->mode;
  1150. inode->i_uid = perms->uid;
  1151. inode->i_gid = perms->gid;
  1152. inode->i_atime = ts;
  1153. inode->i_mtime = ts;
  1154. inode->i_ctime = ts;
  1155. inode->i_private = data;
  1156. if (fops)
  1157. inode->i_fop = fops;
  1158. if (iops)
  1159. inode->i_op = iops;
  1160. }
  1161. return inode;
  1162. }
  1163. /* Create "regular" file */
  1164. static struct dentry *ffs_sb_create_file(struct super_block *sb,
  1165. const char *name, void *data,
  1166. const struct file_operations *fops)
  1167. {
  1168. struct ffs_data *ffs = sb->s_fs_info;
  1169. struct dentry *dentry;
  1170. struct inode *inode;
  1171. ENTER();
  1172. dentry = d_alloc_name(sb->s_root, name);
  1173. if (!dentry)
  1174. return NULL;
  1175. inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
  1176. if (!inode) {
  1177. dput(dentry);
  1178. return NULL;
  1179. }
  1180. d_add(dentry, inode);
  1181. return dentry;
  1182. }
  1183. /* Super block */
  1184. static const struct super_operations ffs_sb_operations = {
  1185. .statfs = simple_statfs,
  1186. .drop_inode = generic_delete_inode,
  1187. };
  1188. struct ffs_sb_fill_data {
  1189. struct ffs_file_perms perms;
  1190. umode_t root_mode;
  1191. const char *dev_name;
  1192. bool no_disconnect;
  1193. struct ffs_data *ffs_data;
  1194. };
  1195. static int ffs_sb_fill(struct super_block *sb, struct fs_context *fc)
  1196. {
  1197. struct ffs_sb_fill_data *data = fc->fs_private;
  1198. struct inode *inode;
  1199. struct ffs_data *ffs = data->ffs_data;
  1200. ENTER();
  1201. ffs->sb = sb;
  1202. data->ffs_data = NULL;
  1203. sb->s_fs_info = ffs;
  1204. sb->s_blocksize = PAGE_SIZE;
  1205. sb->s_blocksize_bits = PAGE_SHIFT;
  1206. sb->s_magic = FUNCTIONFS_MAGIC;
  1207. sb->s_op = &ffs_sb_operations;
  1208. sb->s_time_gran = 1;
  1209. /* Root inode */
  1210. data->perms.mode = data->root_mode;
  1211. inode = ffs_sb_make_inode(sb, NULL,
  1212. &simple_dir_operations,
  1213. &simple_dir_inode_operations,
  1214. &data->perms);
  1215. sb->s_root = d_make_root(inode);
  1216. if (!sb->s_root)
  1217. return -ENOMEM;
  1218. /* EP0 file */
  1219. if (!ffs_sb_create_file(sb, "ep0", ffs, &ffs_ep0_operations))
  1220. return -ENOMEM;
  1221. return 0;
  1222. }
  1223. enum {
  1224. Opt_no_disconnect,
  1225. Opt_rmode,
  1226. Opt_fmode,
  1227. Opt_mode,
  1228. Opt_uid,
  1229. Opt_gid,
  1230. };
  1231. static const struct fs_parameter_spec ffs_fs_fs_parameters[] = {
  1232. fsparam_bool ("no_disconnect", Opt_no_disconnect),
  1233. fsparam_u32 ("rmode", Opt_rmode),
  1234. fsparam_u32 ("fmode", Opt_fmode),
  1235. fsparam_u32 ("mode", Opt_mode),
  1236. fsparam_u32 ("uid", Opt_uid),
  1237. fsparam_u32 ("gid", Opt_gid),
  1238. {}
  1239. };
  1240. static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
  1241. {
  1242. struct ffs_sb_fill_data *data = fc->fs_private;
  1243. struct fs_parse_result result;
  1244. int opt;
  1245. ENTER();
  1246. opt = fs_parse(fc, ffs_fs_fs_parameters, param, &result);
  1247. if (opt < 0)
  1248. return opt;
  1249. switch (opt) {
  1250. case Opt_no_disconnect:
  1251. data->no_disconnect = result.boolean;
  1252. break;
  1253. case Opt_rmode:
  1254. data->root_mode = (result.uint_32 & 0555) | S_IFDIR;
  1255. break;
  1256. case Opt_fmode:
  1257. data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
  1258. break;
  1259. case Opt_mode:
  1260. data->root_mode = (result.uint_32 & 0555) | S_IFDIR;
  1261. data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
  1262. break;
  1263. case Opt_uid:
  1264. data->perms.uid = make_kuid(current_user_ns(), result.uint_32);
  1265. if (!uid_valid(data->perms.uid))
  1266. goto unmapped_value;
  1267. break;
  1268. case Opt_gid:
  1269. data->perms.gid = make_kgid(current_user_ns(), result.uint_32);
  1270. if (!gid_valid(data->perms.gid))
  1271. goto unmapped_value;
  1272. break;
  1273. default:
  1274. return -ENOPARAM;
  1275. }
  1276. return 0;
  1277. unmapped_value:
  1278. return invalf(fc, "%s: unmapped value: %u", param->key, result.uint_32);
  1279. }
  1280. /*
  1281. * Set up the superblock for a mount.
  1282. */
  1283. static int ffs_fs_get_tree(struct fs_context *fc)
  1284. {
  1285. struct ffs_sb_fill_data *ctx = fc->fs_private;
  1286. struct ffs_data *ffs;
  1287. int ret;
  1288. ENTER();
  1289. if (!fc->source)
  1290. return invalf(fc, "No source specified");
  1291. ffs = ffs_data_new(fc->source);
  1292. if (!ffs)
  1293. return -ENOMEM;
  1294. ffs->file_perms = ctx->perms;
  1295. ffs->no_disconnect = ctx->no_disconnect;
  1296. ffs->dev_name = kstrdup(fc->source, GFP_KERNEL);
  1297. if (!ffs->dev_name) {
  1298. ffs_data_put(ffs);
  1299. return -ENOMEM;
  1300. }
  1301. ret = ffs_acquire_dev(ffs->dev_name, ffs);
  1302. if (ret) {
  1303. ffs_data_put(ffs);
  1304. return ret;
  1305. }
  1306. ctx->ffs_data = ffs;
  1307. return get_tree_nodev(fc, ffs_sb_fill);
  1308. }
  1309. static void ffs_fs_free_fc(struct fs_context *fc)
  1310. {
  1311. struct ffs_sb_fill_data *ctx = fc->fs_private;
  1312. if (ctx) {
  1313. if (ctx->ffs_data) {
  1314. ffs_data_put(ctx->ffs_data);
  1315. }
  1316. kfree(ctx);
  1317. }
  1318. }
  1319. static const struct fs_context_operations ffs_fs_context_ops = {
  1320. .free = ffs_fs_free_fc,
  1321. .parse_param = ffs_fs_parse_param,
  1322. .get_tree = ffs_fs_get_tree,
  1323. };
  1324. static int ffs_fs_init_fs_context(struct fs_context *fc)
  1325. {
  1326. struct ffs_sb_fill_data *ctx;
  1327. ctx = kzalloc(sizeof(struct ffs_sb_fill_data), GFP_KERNEL);
  1328. if (!ctx)
  1329. return -ENOMEM;
  1330. ctx->perms.mode = S_IFREG | 0600;
  1331. ctx->perms.uid = GLOBAL_ROOT_UID;
  1332. ctx->perms.gid = GLOBAL_ROOT_GID;
  1333. ctx->root_mode = S_IFDIR | 0500;
  1334. ctx->no_disconnect = false;
  1335. fc->fs_private = ctx;
  1336. fc->ops = &ffs_fs_context_ops;
  1337. return 0;
  1338. }
  1339. static void
  1340. ffs_fs_kill_sb(struct super_block *sb)
  1341. {
  1342. ENTER();
  1343. kill_litter_super(sb);
  1344. if (sb->s_fs_info)
  1345. ffs_data_closed(sb->s_fs_info);
  1346. }
  1347. static struct file_system_type ffs_fs_type = {
  1348. .owner = THIS_MODULE,
  1349. .name = "functionfs",
  1350. .init_fs_context = ffs_fs_init_fs_context,
  1351. .parameters = ffs_fs_fs_parameters,
  1352. .kill_sb = ffs_fs_kill_sb,
  1353. };
  1354. MODULE_ALIAS_FS("functionfs");
  1355. /* Driver's main init/cleanup functions *************************************/
  1356. static int functionfs_init(void)
  1357. {
  1358. int ret;
  1359. ENTER();
  1360. ret = register_filesystem(&ffs_fs_type);
  1361. if (!ret)
  1362. pr_info("file system registered\n");
  1363. else
  1364. pr_err("failed registering file system (%d)\n", ret);
  1365. return ret;
  1366. }
  1367. static void functionfs_cleanup(void)
  1368. {
  1369. ENTER();
  1370. pr_info("unloading\n");
  1371. unregister_filesystem(&ffs_fs_type);
  1372. }
  1373. /* ffs_data and ffs_function construction and destruction code **************/
  1374. static void ffs_data_clear(struct ffs_data *ffs);
  1375. static void ffs_data_reset(struct ffs_data *ffs);
  1376. static void ffs_data_get(struct ffs_data *ffs)
  1377. {
  1378. ENTER();
  1379. refcount_inc(&ffs->ref);
  1380. }
  1381. static void ffs_data_opened(struct ffs_data *ffs)
  1382. {
  1383. ENTER();
  1384. refcount_inc(&ffs->ref);
  1385. if (atomic_add_return(1, &ffs->opened) == 1 &&
  1386. ffs->state == FFS_DEACTIVATED) {
  1387. ffs->state = FFS_CLOSING;
  1388. ffs_data_reset(ffs);
  1389. }
  1390. }
  1391. static void ffs_data_put(struct ffs_data *ffs)
  1392. {
  1393. ENTER();
  1394. if (refcount_dec_and_test(&ffs->ref)) {
  1395. pr_info("%s(): freeing\n", __func__);
  1396. ffs_data_clear(ffs);
  1397. ffs_release_dev(ffs->private_data);
  1398. BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
  1399. swait_active(&ffs->ep0req_completion.wait) ||
  1400. waitqueue_active(&ffs->wait));
  1401. destroy_workqueue(ffs->io_completion_wq);
  1402. kfree(ffs->dev_name);
  1403. kfree(ffs);
  1404. }
  1405. }
  1406. static void ffs_data_closed(struct ffs_data *ffs)
  1407. {
  1408. struct ffs_epfile *epfiles;
  1409. unsigned long flags;
  1410. ENTER();
  1411. if (atomic_dec_and_test(&ffs->opened)) {
  1412. if (ffs->no_disconnect) {
  1413. ffs->state = FFS_DEACTIVATED;
  1414. spin_lock_irqsave(&ffs->eps_lock, flags);
  1415. epfiles = ffs->epfiles;
  1416. ffs->epfiles = NULL;
  1417. spin_unlock_irqrestore(&ffs->eps_lock,
  1418. flags);
  1419. if (epfiles)
  1420. ffs_epfiles_destroy(epfiles,
  1421. ffs->eps_count);
  1422. if (ffs->setup_state == FFS_SETUP_PENDING)
  1423. __ffs_ep0_stall(ffs);
  1424. } else {
  1425. ffs->state = FFS_CLOSING;
  1426. ffs_data_reset(ffs);
  1427. }
  1428. }
  1429. if (atomic_read(&ffs->opened) < 0) {
  1430. ffs->state = FFS_CLOSING;
  1431. ffs_data_reset(ffs);
  1432. }
  1433. ffs_data_put(ffs);
  1434. }
  1435. static struct ffs_data *ffs_data_new(const char *dev_name)
  1436. {
  1437. struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
  1438. if (!ffs)
  1439. return NULL;
  1440. ENTER();
  1441. ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
  1442. if (!ffs->io_completion_wq) {
  1443. kfree(ffs);
  1444. return NULL;
  1445. }
  1446. refcount_set(&ffs->ref, 1);
  1447. atomic_set(&ffs->opened, 0);
  1448. ffs->state = FFS_READ_DESCRIPTORS;
  1449. mutex_init(&ffs->mutex);
  1450. spin_lock_init(&ffs->eps_lock);
  1451. init_waitqueue_head(&ffs->ev.waitq);
  1452. init_waitqueue_head(&ffs->wait);
  1453. init_completion(&ffs->ep0req_completion);
  1454. /* XXX REVISIT need to update it in some places, or do we? */
  1455. ffs->ev.can_stall = 1;
  1456. return ffs;
  1457. }
  1458. static void ffs_data_clear(struct ffs_data *ffs)
  1459. {
  1460. struct ffs_epfile *epfiles;
  1461. unsigned long flags;
  1462. ENTER();
  1463. ffs_closed(ffs);
  1464. BUG_ON(ffs->gadget);
  1465. spin_lock_irqsave(&ffs->eps_lock, flags);
  1466. epfiles = ffs->epfiles;
  1467. ffs->epfiles = NULL;
  1468. spin_unlock_irqrestore(&ffs->eps_lock, flags);
  1469. /*
  1470. * potential race possible between ffs_func_eps_disable
  1471. * & ffs_epfile_release therefore maintaining a local
  1472. * copy of epfile will save us from use-after-free.
  1473. */
  1474. if (epfiles) {
  1475. ffs_epfiles_destroy(epfiles, ffs->eps_count);
  1476. ffs->epfiles = NULL;
  1477. }
  1478. if (ffs->ffs_eventfd) {
  1479. eventfd_ctx_put(ffs->ffs_eventfd);
  1480. ffs->ffs_eventfd = NULL;
  1481. }
  1482. kfree(ffs->raw_descs_data);
  1483. kfree(ffs->raw_strings);
  1484. kfree(ffs->stringtabs);
  1485. }
  1486. static void ffs_data_reset(struct ffs_data *ffs)
  1487. {
  1488. ENTER();
  1489. ffs_data_clear(ffs);
  1490. ffs->raw_descs_data = NULL;
  1491. ffs->raw_descs = NULL;
  1492. ffs->raw_strings = NULL;
  1493. ffs->stringtabs = NULL;
  1494. ffs->raw_descs_length = 0;
  1495. ffs->fs_descs_count = 0;
  1496. ffs->hs_descs_count = 0;
  1497. ffs->ss_descs_count = 0;
  1498. ffs->strings_count = 0;
  1499. ffs->interfaces_count = 0;
  1500. ffs->eps_count = 0;
  1501. ffs->ev.count = 0;
  1502. ffs->state = FFS_READ_DESCRIPTORS;
  1503. ffs->setup_state = FFS_NO_SETUP;
  1504. ffs->flags = 0;
  1505. ffs->ms_os_descs_ext_prop_count = 0;
  1506. ffs->ms_os_descs_ext_prop_name_len = 0;
  1507. ffs->ms_os_descs_ext_prop_data_len = 0;
  1508. }
  1509. static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
  1510. {
  1511. struct usb_gadget_strings **lang;
  1512. int first_id;
  1513. ENTER();
  1514. if (WARN_ON(ffs->state != FFS_ACTIVE
  1515. || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
  1516. return -EBADFD;
  1517. first_id = usb_string_ids_n(cdev, ffs->strings_count);
  1518. if (first_id < 0)
  1519. return first_id;
  1520. ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
  1521. if (!ffs->ep0req)
  1522. return -ENOMEM;
  1523. ffs->ep0req->complete = ffs_ep0_complete;
  1524. ffs->ep0req->context = ffs;
  1525. lang = ffs->stringtabs;
  1526. if (lang) {
  1527. for (; *lang; ++lang) {
  1528. struct usb_string *str = (*lang)->strings;
  1529. int id = first_id;
  1530. for (; str->s; ++id, ++str)
  1531. str->id = id;
  1532. }
  1533. }
  1534. ffs->gadget = cdev->gadget;
  1535. ffs_data_get(ffs);
  1536. return 0;
  1537. }
  1538. static void functionfs_unbind(struct ffs_data *ffs)
  1539. {
  1540. ENTER();
  1541. if (!WARN_ON(!ffs->gadget)) {
  1542. /* dequeue before freeing ep0req */
  1543. usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
  1544. mutex_lock(&ffs->mutex);
  1545. usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
  1546. ffs->ep0req = NULL;
  1547. ffs->gadget = NULL;
  1548. clear_bit(FFS_FL_BOUND, &ffs->flags);
  1549. mutex_unlock(&ffs->mutex);
  1550. ffs_data_put(ffs);
  1551. }
  1552. }
  1553. static int ffs_epfiles_create(struct ffs_data *ffs)
  1554. {
  1555. struct ffs_epfile *epfile, *epfiles;
  1556. unsigned i, count;
  1557. ENTER();
  1558. count = ffs->eps_count;
  1559. epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
  1560. if (!epfiles)
  1561. return -ENOMEM;
  1562. epfile = epfiles;
  1563. for (i = 1; i <= count; ++i, ++epfile) {
  1564. epfile->ffs = ffs;
  1565. mutex_init(&epfile->mutex);
  1566. if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
  1567. sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
  1568. else
  1569. sprintf(epfile->name, "ep%u", i);
  1570. epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
  1571. epfile,
  1572. &ffs_epfile_operations);
  1573. if (!epfile->dentry) {
  1574. ffs_epfiles_destroy(epfiles, i - 1);
  1575. return -ENOMEM;
  1576. }
  1577. }
  1578. ffs->epfiles = epfiles;
  1579. return 0;
  1580. }
  1581. static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
  1582. {
  1583. struct ffs_epfile *epfile = epfiles;
  1584. ENTER();
  1585. for (; count; --count, ++epfile) {
  1586. BUG_ON(mutex_is_locked(&epfile->mutex));
  1587. if (epfile->dentry) {
  1588. d_delete(epfile->dentry);
  1589. dput(epfile->dentry);
  1590. epfile->dentry = NULL;
  1591. }
  1592. }
  1593. kfree(epfiles);
  1594. }
  1595. static void ffs_func_eps_disable(struct ffs_function *func)
  1596. {
  1597. struct ffs_ep *ep;
  1598. struct ffs_epfile *epfile;
  1599. unsigned short count;
  1600. unsigned long flags;
  1601. spin_lock_irqsave(&func->ffs->eps_lock, flags);
  1602. count = func->ffs->eps_count;
  1603. epfile = func->ffs->epfiles;
  1604. ep = func->eps;
  1605. while (count--) {
  1606. /* pending requests get nuked */
  1607. if (ep->ep)
  1608. usb_ep_disable(ep->ep);
  1609. ++ep;
  1610. if (epfile) {
  1611. epfile->ep = NULL;
  1612. __ffs_epfile_read_buffer_free(epfile);
  1613. ++epfile;
  1614. }
  1615. }
  1616. spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
  1617. }
  1618. static int ffs_func_eps_enable(struct ffs_function *func)
  1619. {
  1620. struct ffs_data *ffs;
  1621. struct ffs_ep *ep;
  1622. struct ffs_epfile *epfile;
  1623. unsigned short count;
  1624. unsigned long flags;
  1625. int ret = 0;
  1626. spin_lock_irqsave(&func->ffs->eps_lock, flags);
  1627. ffs = func->ffs;
  1628. ep = func->eps;
  1629. epfile = ffs->epfiles;
  1630. count = ffs->eps_count;
  1631. while(count--) {
  1632. ep->ep->driver_data = ep;
  1633. ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
  1634. if (ret) {
  1635. pr_err("%s: config_ep_by_speed(%s) returned %d\n",
  1636. __func__, ep->ep->name, ret);
  1637. break;
  1638. }
  1639. ret = usb_ep_enable(ep->ep);
  1640. if (!ret) {
  1641. epfile->ep = ep;
  1642. epfile->in = usb_endpoint_dir_in(ep->ep->desc);
  1643. epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
  1644. } else {
  1645. break;
  1646. }
  1647. ++ep;
  1648. ++epfile;
  1649. }
  1650. wake_up_interruptible(&ffs->wait);
  1651. spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
  1652. return ret;
  1653. }
  1654. /* Parsing and building descriptors and strings *****************************/
  1655. /*
  1656. * This validates if data pointed by data is a valid USB descriptor as
  1657. * well as record how many interfaces, endpoints and strings are
  1658. * required by given configuration. Returns address after the
  1659. * descriptor or NULL if data is invalid.
  1660. */
  1661. enum ffs_entity_type {
  1662. FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
  1663. };
  1664. enum ffs_os_desc_type {
  1665. FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
  1666. };
  1667. typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
  1668. u8 *valuep,
  1669. struct usb_descriptor_header *desc,
  1670. void *priv);
  1671. typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
  1672. struct usb_os_desc_header *h, void *data,
  1673. unsigned len, void *priv);
  1674. static int __must_check ffs_do_single_desc(char *data, unsigned len,
  1675. ffs_entity_callback entity,
  1676. void *priv, int *current_class)
  1677. {
  1678. struct usb_descriptor_header *_ds = (void *)data;
  1679. u8 length;
  1680. int ret;
  1681. ENTER();
  1682. /* At least two bytes are required: length and type */
  1683. if (len < 2) {
  1684. pr_vdebug("descriptor too short\n");
  1685. return -EINVAL;
  1686. }
  1687. /* If we have at least as many bytes as the descriptor takes? */
  1688. length = _ds->bLength;
  1689. if (len < length) {
  1690. pr_vdebug("descriptor longer then available data\n");
  1691. return -EINVAL;
  1692. }
  1693. #define __entity_check_INTERFACE(val) 1
  1694. #define __entity_check_STRING(val) (val)
  1695. #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
  1696. #define __entity(type, val) do { \
  1697. pr_vdebug("entity " #type "(%02x)\n", (val)); \
  1698. if (!__entity_check_ ##type(val)) { \
  1699. pr_vdebug("invalid entity's value\n"); \
  1700. return -EINVAL; \
  1701. } \
  1702. ret = entity(FFS_ ##type, &val, _ds, priv); \
  1703. if (ret < 0) { \
  1704. pr_debug("entity " #type "(%02x); ret = %d\n", \
  1705. (val), ret); \
  1706. return ret; \
  1707. } \
  1708. } while (0)
  1709. /* Parse descriptor depending on type. */
  1710. switch (_ds->bDescriptorType) {
  1711. case USB_DT_DEVICE:
  1712. case USB_DT_CONFIG:
  1713. case USB_DT_STRING:
  1714. case USB_DT_DEVICE_QUALIFIER:
  1715. /* function can't have any of those */
  1716. pr_vdebug("descriptor reserved for gadget: %d\n",
  1717. _ds->bDescriptorType);
  1718. return -EINVAL;
  1719. case USB_DT_INTERFACE: {
  1720. struct usb_interface_descriptor *ds = (void *)_ds;
  1721. pr_vdebug("interface descriptor\n");
  1722. if (length != sizeof *ds)
  1723. goto inv_length;
  1724. __entity(INTERFACE, ds->bInterfaceNumber);
  1725. if (ds->iInterface)
  1726. __entity(STRING, ds->iInterface);
  1727. *current_class = ds->bInterfaceClass;
  1728. }
  1729. break;
  1730. case USB_DT_ENDPOINT: {
  1731. struct usb_endpoint_descriptor *ds = (void *)_ds;
  1732. pr_vdebug("endpoint descriptor\n");
  1733. if (length != USB_DT_ENDPOINT_SIZE &&
  1734. length != USB_DT_ENDPOINT_AUDIO_SIZE)
  1735. goto inv_length;
  1736. __entity(ENDPOINT, ds->bEndpointAddress);
  1737. }
  1738. break;
  1739. case USB_TYPE_CLASS | 0x01:
  1740. if (*current_class == USB_INTERFACE_CLASS_HID) {
  1741. pr_vdebug("hid descriptor\n");
  1742. if (length != sizeof(struct hid_descriptor))
  1743. goto inv_length;
  1744. break;
  1745. } else if (*current_class == USB_INTERFACE_CLASS_CCID) {
  1746. pr_vdebug("ccid descriptor\n");
  1747. if (length != sizeof(struct ccid_descriptor))
  1748. goto inv_length;
  1749. break;
  1750. } else {
  1751. pr_vdebug("unknown descriptor: %d for class %d\n",
  1752. _ds->bDescriptorType, *current_class);
  1753. return -EINVAL;
  1754. }
  1755. case USB_DT_OTG:
  1756. if (length != sizeof(struct usb_otg_descriptor))
  1757. goto inv_length;
  1758. break;
  1759. case USB_DT_INTERFACE_ASSOCIATION: {
  1760. struct usb_interface_assoc_descriptor *ds = (void *)_ds;
  1761. pr_vdebug("interface association descriptor\n");
  1762. if (length != sizeof *ds)
  1763. goto inv_length;
  1764. if (ds->iFunction)
  1765. __entity(STRING, ds->iFunction);
  1766. }
  1767. break;
  1768. case USB_DT_SS_ENDPOINT_COMP:
  1769. pr_vdebug("EP SS companion descriptor\n");
  1770. if (length != sizeof(struct usb_ss_ep_comp_descriptor))
  1771. goto inv_length;
  1772. break;
  1773. case USB_DT_OTHER_SPEED_CONFIG:
  1774. case USB_DT_INTERFACE_POWER:
  1775. case USB_DT_DEBUG:
  1776. case USB_DT_SECURITY:
  1777. case USB_DT_CS_RADIO_CONTROL:
  1778. /* TODO */
  1779. pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
  1780. return -EINVAL;
  1781. default:
  1782. /* We should never be here */
  1783. pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
  1784. return -EINVAL;
  1785. inv_length:
  1786. pr_vdebug("invalid length: %d (descriptor %d)\n",
  1787. _ds->bLength, _ds->bDescriptorType);
  1788. return -EINVAL;
  1789. }
  1790. #undef __entity
  1791. #undef __entity_check_DESCRIPTOR
  1792. #undef __entity_check_INTERFACE
  1793. #undef __entity_check_STRING
  1794. #undef __entity_check_ENDPOINT
  1795. return length;
  1796. }
  1797. static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
  1798. ffs_entity_callback entity, void *priv)
  1799. {
  1800. const unsigned _len = len;
  1801. unsigned long num = 0;
  1802. int current_class = -1;
  1803. ENTER();
  1804. for (;;) {
  1805. int ret;
  1806. if (num == count)
  1807. data = NULL;
  1808. /* Record "descriptor" entity */
  1809. ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
  1810. if (ret < 0) {
  1811. pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
  1812. num, ret);
  1813. return ret;
  1814. }
  1815. if (!data)
  1816. return _len - len;
  1817. ret = ffs_do_single_desc(data, len, entity, priv,
  1818. &current_class);
  1819. if (ret < 0) {
  1820. pr_debug("%s returns %d\n", __func__, ret);
  1821. return ret;
  1822. }
  1823. len -= ret;
  1824. data += ret;
  1825. ++num;
  1826. }
  1827. }
  1828. static int __ffs_data_do_entity(enum ffs_entity_type type,
  1829. u8 *valuep, struct usb_descriptor_header *desc,
  1830. void *priv)
  1831. {
  1832. struct ffs_desc_helper *helper = priv;
  1833. struct usb_endpoint_descriptor *d;
  1834. ENTER();
  1835. switch (type) {
  1836. case FFS_DESCRIPTOR:
  1837. break;
  1838. case FFS_INTERFACE:
  1839. /*
  1840. * Interfaces are indexed from zero so if we
  1841. * encountered interface "n" then there are at least
  1842. * "n+1" interfaces.
  1843. */
  1844. if (*valuep >= helper->interfaces_count)
  1845. helper->interfaces_count = *valuep + 1;
  1846. break;
  1847. case FFS_STRING:
  1848. /*
  1849. * Strings are indexed from 1 (0 is reserved
  1850. * for languages list)
  1851. */
  1852. if (*valuep > helper->ffs->strings_count)
  1853. helper->ffs->strings_count = *valuep;
  1854. break;
  1855. case FFS_ENDPOINT:
  1856. d = (void *)desc;
  1857. helper->eps_count++;
  1858. if (helper->eps_count >= FFS_MAX_EPS_COUNT)
  1859. return -EINVAL;
  1860. /* Check if descriptors for any speed were already parsed */
  1861. if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
  1862. helper->ffs->eps_addrmap[helper->eps_count] =
  1863. d->bEndpointAddress;
  1864. else if (helper->ffs->eps_addrmap[helper->eps_count] !=
  1865. d->bEndpointAddress)
  1866. return -EINVAL;
  1867. break;
  1868. }
  1869. return 0;
  1870. }
  1871. static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
  1872. struct usb_os_desc_header *desc)
  1873. {
  1874. u16 bcd_version = le16_to_cpu(desc->bcdVersion);
  1875. u16 w_index = le16_to_cpu(desc->wIndex);
  1876. if (bcd_version != 1) {
  1877. pr_vdebug("unsupported os descriptors version: %d",
  1878. bcd_version);
  1879. return -EINVAL;
  1880. }
  1881. switch (w_index) {
  1882. case 0x4:
  1883. *next_type = FFS_OS_DESC_EXT_COMPAT;
  1884. break;
  1885. case 0x5:
  1886. *next_type = FFS_OS_DESC_EXT_PROP;
  1887. break;
  1888. default:
  1889. pr_vdebug("unsupported os descriptor type: %d", w_index);
  1890. return -EINVAL;
  1891. }
  1892. return sizeof(*desc);
  1893. }
  1894. /*
  1895. * Process all extended compatibility/extended property descriptors
  1896. * of a feature descriptor
  1897. */
  1898. static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
  1899. enum ffs_os_desc_type type,
  1900. u16 feature_count,
  1901. ffs_os_desc_callback entity,
  1902. void *priv,
  1903. struct usb_os_desc_header *h)
  1904. {
  1905. int ret;
  1906. const unsigned _len = len;
  1907. ENTER();
  1908. /* loop over all ext compat/ext prop descriptors */
  1909. while (feature_count--) {
  1910. ret = entity(type, h, data, len, priv);
  1911. if (ret < 0) {
  1912. pr_debug("bad OS descriptor, type: %d\n", type);
  1913. return ret;
  1914. }
  1915. data += ret;
  1916. len -= ret;
  1917. }
  1918. return _len - len;
  1919. }
  1920. /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
  1921. static int __must_check ffs_do_os_descs(unsigned count,
  1922. char *data, unsigned len,
  1923. ffs_os_desc_callback entity, void *priv)
  1924. {
  1925. const unsigned _len = len;
  1926. unsigned long num = 0;
  1927. ENTER();
  1928. for (num = 0; num < count; ++num) {
  1929. int ret;
  1930. enum ffs_os_desc_type type;
  1931. u16 feature_count;
  1932. struct usb_os_desc_header *desc = (void *)data;
  1933. if (len < sizeof(*desc))
  1934. return -EINVAL;
  1935. /*
  1936. * Record "descriptor" entity.
  1937. * Process dwLength, bcdVersion, wIndex, get b/wCount.
  1938. * Move the data pointer to the beginning of extended
  1939. * compatibilities proper or extended properties proper
  1940. * portions of the data
  1941. */
  1942. if (le32_to_cpu(desc->dwLength) > len)
  1943. return -EINVAL;
  1944. ret = __ffs_do_os_desc_header(&type, desc);
  1945. if (ret < 0) {
  1946. pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
  1947. num, ret);
  1948. return ret;
  1949. }
  1950. /*
  1951. * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
  1952. */
  1953. feature_count = le16_to_cpu(desc->wCount);
  1954. if (type == FFS_OS_DESC_EXT_COMPAT &&
  1955. (feature_count > 255 || desc->Reserved))
  1956. return -EINVAL;
  1957. len -= ret;
  1958. data += ret;
  1959. /*
  1960. * Process all function/property descriptors
  1961. * of this Feature Descriptor
  1962. */
  1963. ret = ffs_do_single_os_desc(data, len, type,
  1964. feature_count, entity, priv, desc);
  1965. if (ret < 0) {
  1966. pr_debug("%s returns %d\n", __func__, ret);
  1967. return ret;
  1968. }
  1969. len -= ret;
  1970. data += ret;
  1971. }
  1972. return _len - len;
  1973. }
  1974. /*
  1975. * Validate contents of the buffer from userspace related to OS descriptors.
  1976. */
  1977. static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
  1978. struct usb_os_desc_header *h, void *data,
  1979. unsigned len, void *priv)
  1980. {
  1981. struct ffs_data *ffs = priv;
  1982. u8 length;
  1983. ENTER();
  1984. switch (type) {
  1985. case FFS_OS_DESC_EXT_COMPAT: {
  1986. struct usb_ext_compat_desc *d = data;
  1987. int i;
  1988. if (len < sizeof(*d) ||
  1989. d->bFirstInterfaceNumber >= ffs->interfaces_count)
  1990. return -EINVAL;
  1991. if (d->Reserved1 != 1) {
  1992. /*
  1993. * According to the spec, Reserved1 must be set to 1
  1994. * but older kernels incorrectly rejected non-zero
  1995. * values. We fix it here to avoid returning EINVAL
  1996. * in response to values we used to accept.
  1997. */
  1998. pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
  1999. d->Reserved1 = 1;
  2000. }
  2001. for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
  2002. if (d->Reserved2[i])
  2003. return -EINVAL;
  2004. length = sizeof(struct usb_ext_compat_desc);
  2005. }
  2006. break;
  2007. case FFS_OS_DESC_EXT_PROP: {
  2008. struct usb_ext_prop_desc *d = data;
  2009. u32 type, pdl;
  2010. u16 pnl;
  2011. if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
  2012. return -EINVAL;
  2013. length = le32_to_cpu(d->dwSize);
  2014. if (len < length)
  2015. return -EINVAL;
  2016. type = le32_to_cpu(d->dwPropertyDataType);
  2017. if (type < USB_EXT_PROP_UNICODE ||
  2018. type > USB_EXT_PROP_UNICODE_MULTI) {
  2019. pr_vdebug("unsupported os descriptor property type: %d",
  2020. type);
  2021. return -EINVAL;
  2022. }
  2023. pnl = le16_to_cpu(d->wPropertyNameLength);
  2024. if (length < 14 + pnl) {
  2025. pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
  2026. length, pnl, type);
  2027. return -EINVAL;
  2028. }
  2029. pdl = le32_to_cpu(*(__le32 *)((u8 *)data + 10 + pnl));
  2030. if (length != 14 + pnl + pdl) {
  2031. pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
  2032. length, pnl, pdl, type);
  2033. return -EINVAL;
  2034. }
  2035. ++ffs->ms_os_descs_ext_prop_count;
  2036. /* property name reported to the host as "WCHAR"s */
  2037. ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
  2038. ffs->ms_os_descs_ext_prop_data_len += pdl;
  2039. }
  2040. break;
  2041. default:
  2042. pr_vdebug("unknown descriptor: %d\n", type);
  2043. return -EINVAL;
  2044. }
  2045. return length;
  2046. }
  2047. static int __ffs_data_got_descs(struct ffs_data *ffs,
  2048. char *const _data, size_t len)
  2049. {
  2050. char *data = _data, *raw_descs;
  2051. unsigned os_descs_count = 0, counts[3], flags;
  2052. int ret = -EINVAL, i;
  2053. struct ffs_desc_helper helper;
  2054. ENTER();
  2055. if (get_unaligned_le32(data + 4) != len)
  2056. goto error;
  2057. switch (get_unaligned_le32(data)) {
  2058. case FUNCTIONFS_DESCRIPTORS_MAGIC:
  2059. flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
  2060. data += 8;
  2061. len -= 8;
  2062. break;
  2063. case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
  2064. flags = get_unaligned_le32(data + 8);
  2065. ffs->user_flags = flags;
  2066. if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
  2067. FUNCTIONFS_HAS_HS_DESC |
  2068. FUNCTIONFS_HAS_SS_DESC |
  2069. FUNCTIONFS_HAS_MS_OS_DESC |
  2070. FUNCTIONFS_VIRTUAL_ADDR |
  2071. FUNCTIONFS_EVENTFD |
  2072. FUNCTIONFS_ALL_CTRL_RECIP |
  2073. FUNCTIONFS_CONFIG0_SETUP)) {
  2074. ret = -ENOSYS;
  2075. goto error;
  2076. }
  2077. data += 12;
  2078. len -= 12;
  2079. break;
  2080. default:
  2081. goto error;
  2082. }
  2083. if (flags & FUNCTIONFS_EVENTFD) {
  2084. if (len < 4)
  2085. goto error;
  2086. ffs->ffs_eventfd =
  2087. eventfd_ctx_fdget((int)get_unaligned_le32(data));
  2088. if (IS_ERR(ffs->ffs_eventfd)) {
  2089. ret = PTR_ERR(ffs->ffs_eventfd);
  2090. ffs->ffs_eventfd = NULL;
  2091. goto error;
  2092. }
  2093. data += 4;
  2094. len -= 4;
  2095. }
  2096. /* Read fs_count, hs_count and ss_count (if present) */
  2097. for (i = 0; i < 3; ++i) {
  2098. if (!(flags & (1 << i))) {
  2099. counts[i] = 0;
  2100. } else if (len < 4) {
  2101. goto error;
  2102. } else {
  2103. counts[i] = get_unaligned_le32(data);
  2104. data += 4;
  2105. len -= 4;
  2106. }
  2107. }
  2108. if (flags & (1 << i)) {
  2109. if (len < 4) {
  2110. goto error;
  2111. }
  2112. os_descs_count = get_unaligned_le32(data);
  2113. data += 4;
  2114. len -= 4;
  2115. }
  2116. /* Read descriptors */
  2117. raw_descs = data;
  2118. helper.ffs = ffs;
  2119. for (i = 0; i < 3; ++i) {
  2120. if (!counts[i])
  2121. continue;
  2122. helper.interfaces_count = 0;
  2123. helper.eps_count = 0;
  2124. ret = ffs_do_descs(counts[i], data, len,
  2125. __ffs_data_do_entity, &helper);
  2126. if (ret < 0)
  2127. goto error;
  2128. if (!ffs->eps_count && !ffs->interfaces_count) {
  2129. ffs->eps_count = helper.eps_count;
  2130. ffs->interfaces_count = helper.interfaces_count;
  2131. } else {
  2132. if (ffs->eps_count != helper.eps_count) {
  2133. ret = -EINVAL;
  2134. goto error;
  2135. }
  2136. if (ffs->interfaces_count != helper.interfaces_count) {
  2137. ret = -EINVAL;
  2138. goto error;
  2139. }
  2140. }
  2141. data += ret;
  2142. len -= ret;
  2143. }
  2144. if (os_descs_count) {
  2145. ret = ffs_do_os_descs(os_descs_count, data, len,
  2146. __ffs_data_do_os_desc, ffs);
  2147. if (ret < 0)
  2148. goto error;
  2149. data += ret;
  2150. len -= ret;
  2151. }
  2152. if (raw_descs == data || len) {
  2153. ret = -EINVAL;
  2154. goto error;
  2155. }
  2156. ffs->raw_descs_data = _data;
  2157. ffs->raw_descs = raw_descs;
  2158. ffs->raw_descs_length = data - raw_descs;
  2159. ffs->fs_descs_count = counts[0];
  2160. ffs->hs_descs_count = counts[1];
  2161. ffs->ss_descs_count = counts[2];
  2162. ffs->ms_os_descs_count = os_descs_count;
  2163. return 0;
  2164. error:
  2165. kfree(_data);
  2166. return ret;
  2167. }
  2168. static int __ffs_data_got_strings(struct ffs_data *ffs,
  2169. char *const _data, size_t len)
  2170. {
  2171. u32 str_count, needed_count, lang_count;
  2172. struct usb_gadget_strings **stringtabs, *t;
  2173. const char *data = _data;
  2174. struct usb_string *s;
  2175. ENTER();
  2176. if (len < 16 ||
  2177. get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
  2178. get_unaligned_le32(data + 4) != len)
  2179. goto error;
  2180. str_count = get_unaligned_le32(data + 8);
  2181. lang_count = get_unaligned_le32(data + 12);
  2182. /* if one is zero the other must be zero */
  2183. if (!str_count != !lang_count)
  2184. goto error;
  2185. /* Do we have at least as many strings as descriptors need? */
  2186. needed_count = ffs->strings_count;
  2187. if (str_count < needed_count)
  2188. goto error;
  2189. /*
  2190. * If we don't need any strings just return and free all
  2191. * memory.
  2192. */
  2193. if (!needed_count) {
  2194. kfree(_data);
  2195. return 0;
  2196. }
  2197. /* Allocate everything in one chunk so there's less maintenance. */
  2198. {
  2199. unsigned i = 0;
  2200. vla_group(d);
  2201. vla_item(d, struct usb_gadget_strings *, stringtabs,
  2202. size_add(lang_count, 1));
  2203. vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
  2204. vla_item(d, struct usb_string, strings,
  2205. size_mul(lang_count, (needed_count + 1)));
  2206. char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
  2207. if (!vlabuf) {
  2208. kfree(_data);
  2209. return -ENOMEM;
  2210. }
  2211. /* Initialize the VLA pointers */
  2212. stringtabs = vla_ptr(vlabuf, d, stringtabs);
  2213. t = vla_ptr(vlabuf, d, stringtab);
  2214. i = lang_count;
  2215. do {
  2216. *stringtabs++ = t++;
  2217. } while (--i);
  2218. *stringtabs = NULL;
  2219. /* stringtabs = vlabuf = d_stringtabs for later kfree */
  2220. stringtabs = vla_ptr(vlabuf, d, stringtabs);
  2221. t = vla_ptr(vlabuf, d, stringtab);
  2222. s = vla_ptr(vlabuf, d, strings);
  2223. }
  2224. /* For each language */
  2225. data += 16;
  2226. len -= 16;
  2227. do { /* lang_count > 0 so we can use do-while */
  2228. unsigned needed = needed_count;
  2229. u32 str_per_lang = str_count;
  2230. if (len < 3)
  2231. goto error_free;
  2232. t->language = get_unaligned_le16(data);
  2233. t->strings = s;
  2234. ++t;
  2235. data += 2;
  2236. len -= 2;
  2237. /* For each string */
  2238. do { /* str_count > 0 so we can use do-while */
  2239. size_t length = strnlen(data, len);
  2240. if (length == len)
  2241. goto error_free;
  2242. /*
  2243. * User may provide more strings then we need,
  2244. * if that's the case we simply ignore the
  2245. * rest
  2246. */
  2247. if (needed) {
  2248. /*
  2249. * s->id will be set while adding
  2250. * function to configuration so for
  2251. * now just leave garbage here.
  2252. */
  2253. s->s = data;
  2254. --needed;
  2255. ++s;
  2256. }
  2257. data += length + 1;
  2258. len -= length + 1;
  2259. } while (--str_per_lang);
  2260. s->id = 0; /* terminator */
  2261. s->s = NULL;
  2262. ++s;
  2263. } while (--lang_count);
  2264. /* Some garbage left? */
  2265. if (len)
  2266. goto error_free;
  2267. /* Done! */
  2268. ffs->stringtabs = stringtabs;
  2269. ffs->raw_strings = _data;
  2270. return 0;
  2271. error_free:
  2272. kfree(stringtabs);
  2273. error:
  2274. kfree(_data);
  2275. return -EINVAL;
  2276. }
  2277. /* Events handling and management *******************************************/
  2278. static void __ffs_event_add(struct ffs_data *ffs,
  2279. enum usb_functionfs_event_type type)
  2280. {
  2281. enum usb_functionfs_event_type rem_type1, rem_type2 = type;
  2282. int neg = 0;
  2283. /*
  2284. * Abort any unhandled setup
  2285. *
  2286. * We do not need to worry about some cmpxchg() changing value
  2287. * of ffs->setup_state without holding the lock because when
  2288. * state is FFS_SETUP_PENDING cmpxchg() in several places in
  2289. * the source does nothing.
  2290. */
  2291. if (ffs->setup_state == FFS_SETUP_PENDING)
  2292. ffs->setup_state = FFS_SETUP_CANCELLED;
  2293. /*
  2294. * Logic of this function guarantees that there are at most four pending
  2295. * evens on ffs->ev.types queue. This is important because the queue
  2296. * has space for four elements only and __ffs_ep0_read_events function
  2297. * depends on that limit as well. If more event types are added, those
  2298. * limits have to be revisited or guaranteed to still hold.
  2299. */
  2300. switch (type) {
  2301. case FUNCTIONFS_RESUME:
  2302. rem_type2 = FUNCTIONFS_SUSPEND;
  2303. fallthrough;
  2304. case FUNCTIONFS_SUSPEND:
  2305. case FUNCTIONFS_SETUP:
  2306. rem_type1 = type;
  2307. /* Discard all similar events */
  2308. break;
  2309. case FUNCTIONFS_BIND:
  2310. case FUNCTIONFS_UNBIND:
  2311. case FUNCTIONFS_DISABLE:
  2312. case FUNCTIONFS_ENABLE:
  2313. /* Discard everything other then power management. */
  2314. rem_type1 = FUNCTIONFS_SUSPEND;
  2315. rem_type2 = FUNCTIONFS_RESUME;
  2316. neg = 1;
  2317. break;
  2318. default:
  2319. WARN(1, "%d: unknown event, this should not happen\n", type);
  2320. return;
  2321. }
  2322. {
  2323. u8 *ev = ffs->ev.types, *out = ev;
  2324. unsigned n = ffs->ev.count;
  2325. for (; n; --n, ++ev)
  2326. if ((*ev == rem_type1 || *ev == rem_type2) == neg)
  2327. *out++ = *ev;
  2328. else
  2329. pr_vdebug("purging event %d\n", *ev);
  2330. ffs->ev.count = out - ffs->ev.types;
  2331. }
  2332. pr_vdebug("adding event %d\n", type);
  2333. ffs->ev.types[ffs->ev.count++] = type;
  2334. wake_up_locked(&ffs->ev.waitq);
  2335. if (ffs->ffs_eventfd)
  2336. eventfd_signal(ffs->ffs_eventfd, 1);
  2337. }
  2338. static void ffs_event_add(struct ffs_data *ffs,
  2339. enum usb_functionfs_event_type type)
  2340. {
  2341. unsigned long flags;
  2342. spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
  2343. __ffs_event_add(ffs, type);
  2344. spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
  2345. }
  2346. /* Bind/unbind USB function hooks *******************************************/
  2347. static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
  2348. {
  2349. int i;
  2350. for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
  2351. if (ffs->eps_addrmap[i] == endpoint_address)
  2352. return i;
  2353. return -ENOENT;
  2354. }
  2355. static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
  2356. struct usb_descriptor_header *desc,
  2357. void *priv)
  2358. {
  2359. struct usb_endpoint_descriptor *ds = (void *)desc;
  2360. struct ffs_function *func = priv;
  2361. struct ffs_ep *ffs_ep;
  2362. unsigned ep_desc_id;
  2363. int idx;
  2364. static const char *speed_names[] = { "full", "high", "super" };
  2365. if (type != FFS_DESCRIPTOR)
  2366. return 0;
  2367. /*
  2368. * If ss_descriptors is not NULL, we are reading super speed
  2369. * descriptors; if hs_descriptors is not NULL, we are reading high
  2370. * speed descriptors; otherwise, we are reading full speed
  2371. * descriptors.
  2372. */
  2373. if (func->function.ss_descriptors) {
  2374. ep_desc_id = 2;
  2375. func->function.ss_descriptors[(long)valuep] = desc;
  2376. } else if (func->function.hs_descriptors) {
  2377. ep_desc_id = 1;
  2378. func->function.hs_descriptors[(long)valuep] = desc;
  2379. } else {
  2380. ep_desc_id = 0;
  2381. func->function.fs_descriptors[(long)valuep] = desc;
  2382. }
  2383. if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  2384. return 0;
  2385. idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
  2386. if (idx < 0)
  2387. return idx;
  2388. ffs_ep = func->eps + idx;
  2389. if (ffs_ep->descs[ep_desc_id]) {
  2390. pr_err("two %sspeed descriptors for EP %d\n",
  2391. speed_names[ep_desc_id],
  2392. ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  2393. return -EINVAL;
  2394. }
  2395. ffs_ep->descs[ep_desc_id] = ds;
  2396. ffs_dump_mem(": Original ep desc", ds, ds->bLength);
  2397. if (ffs_ep->ep) {
  2398. ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
  2399. if (!ds->wMaxPacketSize)
  2400. ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
  2401. } else {
  2402. struct usb_request *req;
  2403. struct usb_ep *ep;
  2404. u8 bEndpointAddress;
  2405. u16 wMaxPacketSize;
  2406. /*
  2407. * We back up bEndpointAddress because autoconfig overwrites
  2408. * it with physical endpoint address.
  2409. */
  2410. bEndpointAddress = ds->bEndpointAddress;
  2411. /*
  2412. * We back up wMaxPacketSize because autoconfig treats
  2413. * endpoint descriptors as if they were full speed.
  2414. */
  2415. wMaxPacketSize = ds->wMaxPacketSize;
  2416. pr_vdebug("autoconfig\n");
  2417. ep = usb_ep_autoconfig(func->gadget, ds);
  2418. if (!ep)
  2419. return -ENOTSUPP;
  2420. ep->driver_data = func->eps + idx;
  2421. req = usb_ep_alloc_request(ep, GFP_KERNEL);
  2422. if (!req)
  2423. return -ENOMEM;
  2424. ffs_ep->ep = ep;
  2425. ffs_ep->req = req;
  2426. func->eps_revmap[ds->bEndpointAddress &
  2427. USB_ENDPOINT_NUMBER_MASK] = idx + 1;
  2428. /*
  2429. * If we use virtual address mapping, we restore
  2430. * original bEndpointAddress value.
  2431. */
  2432. if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
  2433. ds->bEndpointAddress = bEndpointAddress;
  2434. /*
  2435. * Restore wMaxPacketSize which was potentially
  2436. * overwritten by autoconfig.
  2437. */
  2438. ds->wMaxPacketSize = wMaxPacketSize;
  2439. }
  2440. ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
  2441. return 0;
  2442. }
  2443. static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
  2444. struct usb_descriptor_header *desc,
  2445. void *priv)
  2446. {
  2447. struct ffs_function *func = priv;
  2448. unsigned idx;
  2449. u8 newValue;
  2450. switch (type) {
  2451. default:
  2452. case FFS_DESCRIPTOR:
  2453. /* Handled in previous pass by __ffs_func_bind_do_descs() */
  2454. return 0;
  2455. case FFS_INTERFACE:
  2456. idx = *valuep;
  2457. if (func->interfaces_nums[idx] < 0) {
  2458. int id = usb_interface_id(func->conf, &func->function);
  2459. if (id < 0)
  2460. return id;
  2461. func->interfaces_nums[idx] = id;
  2462. }
  2463. newValue = func->interfaces_nums[idx];
  2464. break;
  2465. case FFS_STRING:
  2466. /* String' IDs are allocated when fsf_data is bound to cdev */
  2467. newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
  2468. break;
  2469. case FFS_ENDPOINT:
  2470. /*
  2471. * USB_DT_ENDPOINT are handled in
  2472. * __ffs_func_bind_do_descs().
  2473. */
  2474. if (desc->bDescriptorType == USB_DT_ENDPOINT)
  2475. return 0;
  2476. idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
  2477. if (!func->eps[idx].ep)
  2478. return -EINVAL;
  2479. {
  2480. struct usb_endpoint_descriptor **descs;
  2481. descs = func->eps[idx].descs;
  2482. newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
  2483. }
  2484. break;
  2485. }
  2486. pr_vdebug("%02x -> %02x\n", *valuep, newValue);
  2487. *valuep = newValue;
  2488. return 0;
  2489. }
  2490. static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
  2491. struct usb_os_desc_header *h, void *data,
  2492. unsigned len, void *priv)
  2493. {
  2494. struct ffs_function *func = priv;
  2495. u8 length = 0;
  2496. switch (type) {
  2497. case FFS_OS_DESC_EXT_COMPAT: {
  2498. struct usb_ext_compat_desc *desc = data;
  2499. struct usb_os_desc_table *t;
  2500. t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
  2501. t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
  2502. memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
  2503. ARRAY_SIZE(desc->CompatibleID) +
  2504. ARRAY_SIZE(desc->SubCompatibleID));
  2505. length = sizeof(*desc);
  2506. }
  2507. break;
  2508. case FFS_OS_DESC_EXT_PROP: {
  2509. struct usb_ext_prop_desc *desc = data;
  2510. struct usb_os_desc_table *t;
  2511. struct usb_os_desc_ext_prop *ext_prop;
  2512. char *ext_prop_name;
  2513. char *ext_prop_data;
  2514. t = &func->function.os_desc_table[h->interface];
  2515. t->if_id = func->interfaces_nums[h->interface];
  2516. ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
  2517. func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
  2518. ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
  2519. ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
  2520. ext_prop->data_len = le32_to_cpu(*(__le32 *)
  2521. usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
  2522. length = ext_prop->name_len + ext_prop->data_len + 14;
  2523. ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
  2524. func->ffs->ms_os_descs_ext_prop_name_avail +=
  2525. ext_prop->name_len;
  2526. ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
  2527. func->ffs->ms_os_descs_ext_prop_data_avail +=
  2528. ext_prop->data_len;
  2529. memcpy(ext_prop_data,
  2530. usb_ext_prop_data_ptr(data, ext_prop->name_len),
  2531. ext_prop->data_len);
  2532. /* unicode data reported to the host as "WCHAR"s */
  2533. switch (ext_prop->type) {
  2534. case USB_EXT_PROP_UNICODE:
  2535. case USB_EXT_PROP_UNICODE_ENV:
  2536. case USB_EXT_PROP_UNICODE_LINK:
  2537. case USB_EXT_PROP_UNICODE_MULTI:
  2538. ext_prop->data_len *= 2;
  2539. break;
  2540. }
  2541. ext_prop->data = ext_prop_data;
  2542. memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
  2543. ext_prop->name_len);
  2544. /* property name reported to the host as "WCHAR"s */
  2545. ext_prop->name_len *= 2;
  2546. ext_prop->name = ext_prop_name;
  2547. t->os_desc->ext_prop_len +=
  2548. ext_prop->name_len + ext_prop->data_len + 14;
  2549. ++t->os_desc->ext_prop_count;
  2550. list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
  2551. }
  2552. break;
  2553. default:
  2554. pr_vdebug("unknown descriptor: %d\n", type);
  2555. }
  2556. return length;
  2557. }
  2558. static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
  2559. struct usb_configuration *c)
  2560. {
  2561. struct ffs_function *func = ffs_func_from_usb(f);
  2562. struct f_fs_opts *ffs_opts =
  2563. container_of(f->fi, struct f_fs_opts, func_inst);
  2564. struct ffs_data *ffs_data;
  2565. int ret;
  2566. ENTER();
  2567. /*
  2568. * Legacy gadget triggers binding in functionfs_ready_callback,
  2569. * which already uses locking; taking the same lock here would
  2570. * cause a deadlock.
  2571. *
  2572. * Configfs-enabled gadgets however do need ffs_dev_lock.
  2573. */
  2574. if (!ffs_opts->no_configfs)
  2575. ffs_dev_lock();
  2576. ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
  2577. ffs_data = ffs_opts->dev->ffs_data;
  2578. if (!ffs_opts->no_configfs)
  2579. ffs_dev_unlock();
  2580. if (ret)
  2581. return ERR_PTR(ret);
  2582. func->ffs = ffs_data;
  2583. func->conf = c;
  2584. func->gadget = c->cdev->gadget;
  2585. /*
  2586. * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
  2587. * configurations are bound in sequence with list_for_each_entry,
  2588. * in each configuration its functions are bound in sequence
  2589. * with list_for_each_entry, so we assume no race condition
  2590. * with regard to ffs_opts->bound access
  2591. */
  2592. if (!ffs_opts->refcnt) {
  2593. ret = functionfs_bind(func->ffs, c->cdev);
  2594. if (ret)
  2595. return ERR_PTR(ret);
  2596. }
  2597. ffs_opts->refcnt++;
  2598. func->function.strings = func->ffs->stringtabs;
  2599. return ffs_opts;
  2600. }
  2601. static int _ffs_func_bind(struct usb_configuration *c,
  2602. struct usb_function *f)
  2603. {
  2604. struct ffs_function *func = ffs_func_from_usb(f);
  2605. struct ffs_data *ffs = func->ffs;
  2606. const int full = !!func->ffs->fs_descs_count;
  2607. const int high = !!func->ffs->hs_descs_count;
  2608. const int super = !!func->ffs->ss_descs_count;
  2609. int fs_len, hs_len, ss_len, ret, i;
  2610. struct ffs_ep *eps_ptr;
  2611. /* Make it a single chunk, less management later on */
  2612. vla_group(d);
  2613. vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
  2614. vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
  2615. full ? ffs->fs_descs_count + 1 : 0);
  2616. vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
  2617. high ? ffs->hs_descs_count + 1 : 0);
  2618. vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
  2619. super ? ffs->ss_descs_count + 1 : 0);
  2620. vla_item_with_sz(d, short, inums, ffs->interfaces_count);
  2621. vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
  2622. c->cdev->use_os_string ? ffs->interfaces_count : 0);
  2623. vla_item_with_sz(d, char[16], ext_compat,
  2624. c->cdev->use_os_string ? ffs->interfaces_count : 0);
  2625. vla_item_with_sz(d, struct usb_os_desc, os_desc,
  2626. c->cdev->use_os_string ? ffs->interfaces_count : 0);
  2627. vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
  2628. ffs->ms_os_descs_ext_prop_count);
  2629. vla_item_with_sz(d, char, ext_prop_name,
  2630. ffs->ms_os_descs_ext_prop_name_len);
  2631. vla_item_with_sz(d, char, ext_prop_data,
  2632. ffs->ms_os_descs_ext_prop_data_len);
  2633. vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
  2634. char *vlabuf;
  2635. ENTER();
  2636. /* Has descriptors only for speeds gadget does not support */
  2637. if (!(full | high | super))
  2638. return -ENOTSUPP;
  2639. /* Allocate a single chunk, less management later on */
  2640. vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
  2641. if (!vlabuf)
  2642. return -ENOMEM;
  2643. ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
  2644. ffs->ms_os_descs_ext_prop_name_avail =
  2645. vla_ptr(vlabuf, d, ext_prop_name);
  2646. ffs->ms_os_descs_ext_prop_data_avail =
  2647. vla_ptr(vlabuf, d, ext_prop_data);
  2648. /* Copy descriptors */
  2649. memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
  2650. ffs->raw_descs_length);
  2651. memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
  2652. eps_ptr = vla_ptr(vlabuf, d, eps);
  2653. for (i = 0; i < ffs->eps_count; i++)
  2654. eps_ptr[i].num = -1;
  2655. /* Save pointers
  2656. * d_eps == vlabuf, func->eps used to kfree vlabuf later
  2657. */
  2658. func->eps = vla_ptr(vlabuf, d, eps);
  2659. func->interfaces_nums = vla_ptr(vlabuf, d, inums);
  2660. /*
  2661. * Go through all the endpoint descriptors and allocate
  2662. * endpoints first, so that later we can rewrite the endpoint
  2663. * numbers without worrying that it may be described later on.
  2664. */
  2665. if (full) {
  2666. func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
  2667. fs_len = ffs_do_descs(ffs->fs_descs_count,
  2668. vla_ptr(vlabuf, d, raw_descs),
  2669. d_raw_descs__sz,
  2670. __ffs_func_bind_do_descs, func);
  2671. if (fs_len < 0) {
  2672. ret = fs_len;
  2673. goto error;
  2674. }
  2675. } else {
  2676. fs_len = 0;
  2677. }
  2678. if (high) {
  2679. func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
  2680. hs_len = ffs_do_descs(ffs->hs_descs_count,
  2681. vla_ptr(vlabuf, d, raw_descs) + fs_len,
  2682. d_raw_descs__sz - fs_len,
  2683. __ffs_func_bind_do_descs, func);
  2684. if (hs_len < 0) {
  2685. ret = hs_len;
  2686. goto error;
  2687. }
  2688. } else {
  2689. hs_len = 0;
  2690. }
  2691. if (super) {
  2692. func->function.ss_descriptors = func->function.ssp_descriptors =
  2693. vla_ptr(vlabuf, d, ss_descs);
  2694. ss_len = ffs_do_descs(ffs->ss_descs_count,
  2695. vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
  2696. d_raw_descs__sz - fs_len - hs_len,
  2697. __ffs_func_bind_do_descs, func);
  2698. if (ss_len < 0) {
  2699. ret = ss_len;
  2700. goto error;
  2701. }
  2702. } else {
  2703. ss_len = 0;
  2704. }
  2705. /*
  2706. * Now handle interface numbers allocation and interface and
  2707. * endpoint numbers rewriting. We can do that in one go
  2708. * now.
  2709. */
  2710. ret = ffs_do_descs(ffs->fs_descs_count +
  2711. (high ? ffs->hs_descs_count : 0) +
  2712. (super ? ffs->ss_descs_count : 0),
  2713. vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
  2714. __ffs_func_bind_do_nums, func);
  2715. if (ret < 0)
  2716. goto error;
  2717. func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
  2718. if (c->cdev->use_os_string) {
  2719. for (i = 0; i < ffs->interfaces_count; ++i) {
  2720. struct usb_os_desc *desc;
  2721. desc = func->function.os_desc_table[i].os_desc =
  2722. vla_ptr(vlabuf, d, os_desc) +
  2723. i * sizeof(struct usb_os_desc);
  2724. desc->ext_compat_id =
  2725. vla_ptr(vlabuf, d, ext_compat) + i * 16;
  2726. INIT_LIST_HEAD(&desc->ext_prop);
  2727. }
  2728. ret = ffs_do_os_descs(ffs->ms_os_descs_count,
  2729. vla_ptr(vlabuf, d, raw_descs) +
  2730. fs_len + hs_len + ss_len,
  2731. d_raw_descs__sz - fs_len - hs_len -
  2732. ss_len,
  2733. __ffs_func_bind_do_os_desc, func);
  2734. if (ret < 0)
  2735. goto error;
  2736. }
  2737. func->function.os_desc_n =
  2738. c->cdev->use_os_string ? ffs->interfaces_count : 0;
  2739. /* And we're done */
  2740. ffs_event_add(ffs, FUNCTIONFS_BIND);
  2741. return 0;
  2742. error:
  2743. /* XXX Do we need to release all claimed endpoints here? */
  2744. return ret;
  2745. }
  2746. static int ffs_func_bind(struct usb_configuration *c,
  2747. struct usb_function *f)
  2748. {
  2749. struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
  2750. struct ffs_function *func = ffs_func_from_usb(f);
  2751. int ret;
  2752. if (IS_ERR(ffs_opts))
  2753. return PTR_ERR(ffs_opts);
  2754. ret = _ffs_func_bind(c, f);
  2755. if (ret && !--ffs_opts->refcnt)
  2756. functionfs_unbind(func->ffs);
  2757. return ret;
  2758. }
  2759. /* Other USB function hooks *************************************************/
  2760. static void ffs_reset_work(struct work_struct *work)
  2761. {
  2762. struct ffs_data *ffs = container_of(work,
  2763. struct ffs_data, reset_work);
  2764. ffs_data_reset(ffs);
  2765. }
  2766. static int ffs_func_set_alt(struct usb_function *f,
  2767. unsigned interface, unsigned alt)
  2768. {
  2769. struct ffs_function *func = ffs_func_from_usb(f);
  2770. struct ffs_data *ffs = func->ffs;
  2771. int ret = 0, intf;
  2772. if (alt != (unsigned)-1) {
  2773. intf = ffs_func_revmap_intf(func, interface);
  2774. if (intf < 0)
  2775. return intf;
  2776. }
  2777. if (ffs->func)
  2778. ffs_func_eps_disable(ffs->func);
  2779. if (ffs->state == FFS_DEACTIVATED) {
  2780. ffs->state = FFS_CLOSING;
  2781. INIT_WORK(&ffs->reset_work, ffs_reset_work);
  2782. schedule_work(&ffs->reset_work);
  2783. return -ENODEV;
  2784. }
  2785. if (ffs->state != FFS_ACTIVE)
  2786. return -ENODEV;
  2787. if (alt == (unsigned)-1) {
  2788. ffs->func = NULL;
  2789. ffs_event_add(ffs, FUNCTIONFS_DISABLE);
  2790. return 0;
  2791. }
  2792. ffs->func = func;
  2793. ret = ffs_func_eps_enable(func);
  2794. if (ret >= 0)
  2795. ffs_event_add(ffs, FUNCTIONFS_ENABLE);
  2796. return ret;
  2797. }
  2798. static void ffs_func_disable(struct usb_function *f)
  2799. {
  2800. ffs_func_set_alt(f, 0, (unsigned)-1);
  2801. }
  2802. static int ffs_func_setup(struct usb_function *f,
  2803. const struct usb_ctrlrequest *creq)
  2804. {
  2805. struct ffs_function *func = ffs_func_from_usb(f);
  2806. struct ffs_data *ffs = func->ffs;
  2807. unsigned long flags;
  2808. int ret;
  2809. ENTER();
  2810. pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
  2811. pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
  2812. pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
  2813. pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
  2814. pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
  2815. /*
  2816. * Most requests directed to interface go through here
  2817. * (notable exceptions are set/get interface) so we need to
  2818. * handle them. All other either handled by composite or
  2819. * passed to usb_configuration->setup() (if one is set). No
  2820. * matter, we will handle requests directed to endpoint here
  2821. * as well (as it's straightforward). Other request recipient
  2822. * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
  2823. * is being used.
  2824. */
  2825. if (ffs->state != FFS_ACTIVE)
  2826. return -ENODEV;
  2827. switch (creq->bRequestType & USB_RECIP_MASK) {
  2828. case USB_RECIP_INTERFACE:
  2829. ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
  2830. if (ret < 0)
  2831. return ret;
  2832. break;
  2833. case USB_RECIP_ENDPOINT:
  2834. ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
  2835. if (ret < 0)
  2836. return ret;
  2837. if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
  2838. ret = func->ffs->eps_addrmap[ret];
  2839. break;
  2840. default:
  2841. if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP)
  2842. ret = le16_to_cpu(creq->wIndex);
  2843. else
  2844. return -EOPNOTSUPP;
  2845. }
  2846. spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
  2847. ffs->ev.setup = *creq;
  2848. ffs->ev.setup.wIndex = cpu_to_le16(ret);
  2849. __ffs_event_add(ffs, FUNCTIONFS_SETUP);
  2850. spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
  2851. return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
  2852. }
  2853. static bool ffs_func_req_match(struct usb_function *f,
  2854. const struct usb_ctrlrequest *creq,
  2855. bool config0)
  2856. {
  2857. struct ffs_function *func = ffs_func_from_usb(f);
  2858. if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP))
  2859. return false;
  2860. switch (creq->bRequestType & USB_RECIP_MASK) {
  2861. case USB_RECIP_INTERFACE:
  2862. return (ffs_func_revmap_intf(func,
  2863. le16_to_cpu(creq->wIndex)) >= 0);
  2864. case USB_RECIP_ENDPOINT:
  2865. return (ffs_func_revmap_ep(func,
  2866. le16_to_cpu(creq->wIndex)) >= 0);
  2867. default:
  2868. return (bool) (func->ffs->user_flags &
  2869. FUNCTIONFS_ALL_CTRL_RECIP);
  2870. }
  2871. }
  2872. static void ffs_func_suspend(struct usb_function *f)
  2873. {
  2874. ENTER();
  2875. ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
  2876. }
  2877. static void ffs_func_resume(struct usb_function *f)
  2878. {
  2879. ENTER();
  2880. ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
  2881. }
  2882. /* Endpoint and interface numbers reverse mapping ***************************/
  2883. static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
  2884. {
  2885. num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
  2886. return num ? num : -EDOM;
  2887. }
  2888. static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
  2889. {
  2890. short *nums = func->interfaces_nums;
  2891. unsigned count = func->ffs->interfaces_count;
  2892. for (; count; --count, ++nums) {
  2893. if (*nums >= 0 && *nums == intf)
  2894. return nums - func->interfaces_nums;
  2895. }
  2896. return -EDOM;
  2897. }
  2898. /* Devices management *******************************************************/
  2899. static LIST_HEAD(ffs_devices);
  2900. static struct ffs_dev *_ffs_do_find_dev(const char *name)
  2901. {
  2902. struct ffs_dev *dev;
  2903. if (!name)
  2904. return NULL;
  2905. list_for_each_entry(dev, &ffs_devices, entry) {
  2906. if (strcmp(dev->name, name) == 0)
  2907. return dev;
  2908. }
  2909. return NULL;
  2910. }
  2911. /*
  2912. * ffs_lock must be taken by the caller of this function
  2913. */
  2914. static struct ffs_dev *_ffs_get_single_dev(void)
  2915. {
  2916. struct ffs_dev *dev;
  2917. if (list_is_singular(&ffs_devices)) {
  2918. dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
  2919. if (dev->single)
  2920. return dev;
  2921. }
  2922. return NULL;
  2923. }
  2924. /*
  2925. * ffs_lock must be taken by the caller of this function
  2926. */
  2927. static struct ffs_dev *_ffs_find_dev(const char *name)
  2928. {
  2929. struct ffs_dev *dev;
  2930. dev = _ffs_get_single_dev();
  2931. if (dev)
  2932. return dev;
  2933. return _ffs_do_find_dev(name);
  2934. }
  2935. /* Configfs support *********************************************************/
  2936. static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
  2937. {
  2938. return container_of(to_config_group(item), struct f_fs_opts,
  2939. func_inst.group);
  2940. }
  2941. static void ffs_attr_release(struct config_item *item)
  2942. {
  2943. struct f_fs_opts *opts = to_ffs_opts(item);
  2944. usb_put_function_instance(&opts->func_inst);
  2945. }
  2946. static struct configfs_item_operations ffs_item_ops = {
  2947. .release = ffs_attr_release,
  2948. };
  2949. static const struct config_item_type ffs_func_type = {
  2950. .ct_item_ops = &ffs_item_ops,
  2951. .ct_owner = THIS_MODULE,
  2952. };
  2953. /* Function registration interface ******************************************/
  2954. static void ffs_free_inst(struct usb_function_instance *f)
  2955. {
  2956. struct f_fs_opts *opts;
  2957. opts = to_f_fs_opts(f);
  2958. ffs_release_dev(opts->dev);
  2959. ffs_dev_lock();
  2960. _ffs_free_dev(opts->dev);
  2961. ffs_dev_unlock();
  2962. kfree(opts);
  2963. }
  2964. static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
  2965. {
  2966. if (strlen(name) >= sizeof_field(struct ffs_dev, name))
  2967. return -ENAMETOOLONG;
  2968. return ffs_name_dev(to_f_fs_opts(fi)->dev, name);
  2969. }
  2970. static struct usb_function_instance *ffs_alloc_inst(void)
  2971. {
  2972. struct f_fs_opts *opts;
  2973. struct ffs_dev *dev;
  2974. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  2975. if (!opts)
  2976. return ERR_PTR(-ENOMEM);
  2977. opts->func_inst.set_inst_name = ffs_set_inst_name;
  2978. opts->func_inst.free_func_inst = ffs_free_inst;
  2979. ffs_dev_lock();
  2980. dev = _ffs_alloc_dev();
  2981. ffs_dev_unlock();
  2982. if (IS_ERR(dev)) {
  2983. kfree(opts);
  2984. return ERR_CAST(dev);
  2985. }
  2986. opts->dev = dev;
  2987. dev->opts = opts;
  2988. config_group_init_type_name(&opts->func_inst.group, "",
  2989. &ffs_func_type);
  2990. return &opts->func_inst;
  2991. }
  2992. static void ffs_free(struct usb_function *f)
  2993. {
  2994. kfree(ffs_func_from_usb(f));
  2995. }
  2996. static void ffs_func_unbind(struct usb_configuration *c,
  2997. struct usb_function *f)
  2998. {
  2999. struct ffs_function *func = ffs_func_from_usb(f);
  3000. struct ffs_data *ffs = func->ffs;
  3001. struct f_fs_opts *opts =
  3002. container_of(f->fi, struct f_fs_opts, func_inst);
  3003. struct ffs_ep *ep = func->eps;
  3004. unsigned count = ffs->eps_count;
  3005. unsigned long flags;
  3006. ENTER();
  3007. if (ffs->func == func) {
  3008. ffs_func_eps_disable(func);
  3009. ffs->func = NULL;
  3010. }
  3011. /* Drain any pending AIO completions */
  3012. drain_workqueue(ffs->io_completion_wq);
  3013. ffs_event_add(ffs, FUNCTIONFS_UNBIND);
  3014. if (!--opts->refcnt)
  3015. functionfs_unbind(ffs);
  3016. /* cleanup after autoconfig */
  3017. spin_lock_irqsave(&func->ffs->eps_lock, flags);
  3018. while (count--) {
  3019. if (ep->ep && ep->req)
  3020. usb_ep_free_request(ep->ep, ep->req);
  3021. ep->req = NULL;
  3022. ++ep;
  3023. }
  3024. spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
  3025. kfree(func->eps);
  3026. func->eps = NULL;
  3027. /*
  3028. * eps, descriptors and interfaces_nums are allocated in the
  3029. * same chunk so only one free is required.
  3030. */
  3031. func->function.fs_descriptors = NULL;
  3032. func->function.hs_descriptors = NULL;
  3033. func->function.ss_descriptors = NULL;
  3034. func->function.ssp_descriptors = NULL;
  3035. func->interfaces_nums = NULL;
  3036. }
  3037. static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
  3038. {
  3039. struct ffs_function *func;
  3040. ENTER();
  3041. func = kzalloc(sizeof(*func), GFP_KERNEL);
  3042. if (!func)
  3043. return ERR_PTR(-ENOMEM);
  3044. func->function.name = "Function FS Gadget";
  3045. func->function.bind = ffs_func_bind;
  3046. func->function.unbind = ffs_func_unbind;
  3047. func->function.set_alt = ffs_func_set_alt;
  3048. func->function.disable = ffs_func_disable;
  3049. func->function.setup = ffs_func_setup;
  3050. func->function.req_match = ffs_func_req_match;
  3051. func->function.suspend = ffs_func_suspend;
  3052. func->function.resume = ffs_func_resume;
  3053. func->function.free_func = ffs_free;
  3054. return &func->function;
  3055. }
  3056. /*
  3057. * ffs_lock must be taken by the caller of this function
  3058. */
  3059. static struct ffs_dev *_ffs_alloc_dev(void)
  3060. {
  3061. struct ffs_dev *dev;
  3062. int ret;
  3063. if (_ffs_get_single_dev())
  3064. return ERR_PTR(-EBUSY);
  3065. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  3066. if (!dev)
  3067. return ERR_PTR(-ENOMEM);
  3068. if (list_empty(&ffs_devices)) {
  3069. ret = functionfs_init();
  3070. if (ret) {
  3071. kfree(dev);
  3072. return ERR_PTR(ret);
  3073. }
  3074. }
  3075. list_add(&dev->entry, &ffs_devices);
  3076. return dev;
  3077. }
  3078. int ffs_name_dev(struct ffs_dev *dev, const char *name)
  3079. {
  3080. struct ffs_dev *existing;
  3081. int ret = 0;
  3082. ffs_dev_lock();
  3083. existing = _ffs_do_find_dev(name);
  3084. if (!existing)
  3085. strscpy(dev->name, name, ARRAY_SIZE(dev->name));
  3086. else if (existing != dev)
  3087. ret = -EBUSY;
  3088. ffs_dev_unlock();
  3089. return ret;
  3090. }
  3091. EXPORT_SYMBOL_GPL(ffs_name_dev);
  3092. int ffs_single_dev(struct ffs_dev *dev)
  3093. {
  3094. int ret;
  3095. ret = 0;
  3096. ffs_dev_lock();
  3097. if (!list_is_singular(&ffs_devices))
  3098. ret = -EBUSY;
  3099. else
  3100. dev->single = true;
  3101. ffs_dev_unlock();
  3102. return ret;
  3103. }
  3104. EXPORT_SYMBOL_GPL(ffs_single_dev);
  3105. /*
  3106. * ffs_lock must be taken by the caller of this function
  3107. */
  3108. static void _ffs_free_dev(struct ffs_dev *dev)
  3109. {
  3110. list_del(&dev->entry);
  3111. kfree(dev);
  3112. if (list_empty(&ffs_devices))
  3113. functionfs_cleanup();
  3114. }
  3115. static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data)
  3116. {
  3117. int ret = 0;
  3118. struct ffs_dev *ffs_dev;
  3119. ENTER();
  3120. ffs_dev_lock();
  3121. ffs_dev = _ffs_find_dev(dev_name);
  3122. if (!ffs_dev) {
  3123. ret = -ENOENT;
  3124. } else if (ffs_dev->mounted) {
  3125. ret = -EBUSY;
  3126. } else if (ffs_dev->ffs_acquire_dev_callback &&
  3127. ffs_dev->ffs_acquire_dev_callback(ffs_dev)) {
  3128. ret = -ENOENT;
  3129. } else {
  3130. ffs_dev->mounted = true;
  3131. ffs_dev->ffs_data = ffs_data;
  3132. ffs_data->private_data = ffs_dev;
  3133. }
  3134. ffs_dev_unlock();
  3135. return ret;
  3136. }
  3137. static void ffs_release_dev(struct ffs_dev *ffs_dev)
  3138. {
  3139. ENTER();
  3140. ffs_dev_lock();
  3141. if (ffs_dev && ffs_dev->mounted) {
  3142. ffs_dev->mounted = false;
  3143. if (ffs_dev->ffs_data) {
  3144. ffs_dev->ffs_data->private_data = NULL;
  3145. ffs_dev->ffs_data = NULL;
  3146. }
  3147. if (ffs_dev->ffs_release_dev_callback)
  3148. ffs_dev->ffs_release_dev_callback(ffs_dev);
  3149. }
  3150. ffs_dev_unlock();
  3151. }
  3152. static int ffs_ready(struct ffs_data *ffs)
  3153. {
  3154. struct ffs_dev *ffs_obj;
  3155. int ret = 0;
  3156. ENTER();
  3157. ffs_dev_lock();
  3158. ffs_obj = ffs->private_data;
  3159. if (!ffs_obj) {
  3160. ret = -EINVAL;
  3161. goto done;
  3162. }
  3163. if (WARN_ON(ffs_obj->desc_ready)) {
  3164. ret = -EBUSY;
  3165. goto done;
  3166. }
  3167. ffs_obj->desc_ready = true;
  3168. if (ffs_obj->ffs_ready_callback) {
  3169. ret = ffs_obj->ffs_ready_callback(ffs);
  3170. if (ret)
  3171. goto done;
  3172. }
  3173. set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
  3174. done:
  3175. ffs_dev_unlock();
  3176. return ret;
  3177. }
  3178. static void ffs_closed(struct ffs_data *ffs)
  3179. {
  3180. struct ffs_dev *ffs_obj;
  3181. struct f_fs_opts *opts;
  3182. struct config_item *ci;
  3183. ENTER();
  3184. ffs_dev_lock();
  3185. ffs_obj = ffs->private_data;
  3186. if (!ffs_obj)
  3187. goto done;
  3188. ffs_obj->desc_ready = false;
  3189. if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
  3190. ffs_obj->ffs_closed_callback)
  3191. ffs_obj->ffs_closed_callback(ffs);
  3192. if (ffs_obj->opts)
  3193. opts = ffs_obj->opts;
  3194. else
  3195. goto done;
  3196. if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
  3197. || !kref_read(&opts->func_inst.group.cg_item.ci_kref))
  3198. goto done;
  3199. ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
  3200. ffs_dev_unlock();
  3201. if (test_bit(FFS_FL_BOUND, &ffs->flags))
  3202. unregister_gadget_item(ci);
  3203. return;
  3204. done:
  3205. ffs_dev_unlock();
  3206. }
  3207. /* Misc helper functions ****************************************************/
  3208. static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
  3209. {
  3210. return nonblock
  3211. ? mutex_trylock(mutex) ? 0 : -EAGAIN
  3212. : mutex_lock_interruptible(mutex);
  3213. }
  3214. static char *ffs_prepare_buffer(const char __user *buf, size_t len)
  3215. {
  3216. char *data;
  3217. if (!len)
  3218. return NULL;
  3219. data = memdup_user(buf, len);
  3220. if (IS_ERR(data))
  3221. return data;
  3222. pr_vdebug("Buffer from user space:\n");
  3223. ffs_dump_mem("", data, len);
  3224. return data;
  3225. }
  3226. DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
  3227. MODULE_LICENSE("GPL");
  3228. MODULE_AUTHOR("Michal Nazarewicz");