usf.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474
  1. /* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/compat.h>
  14. #include <linux/fs.h>
  15. #include <linux/module.h>
  16. #include <linux/miscdevice.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/wait.h>
  20. #include <linux/input.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/time.h>
  23. #include <linux/kmemleak.h>
  24. #include <linux/mutex.h>
  25. #include <dsp/apr_audio-v2.h>
  26. #include "q6usm.h"
  27. #include "usf.h"
  28. #include "usfcdev.h"
  29. #include "q6_init.h"
  30. /* The driver version*/
  31. #define DRV_VERSION "1.7.1"
  32. #define USF_VERSION_ID 0x0171
  33. /* Standard timeout in the asynchronous ops */
  34. #define USF_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
  35. /* Undefined USF device */
  36. #define USF_UNDEF_DEV_ID 0xffff
  37. /* TX memory mapping flag */
  38. #define USF_VM_READ 1
  39. /* RX memory mapping flag */
  40. #define USF_VM_WRITE 2
  41. /* Number of events, copied from the user space to kernel one */
  42. #define USF_EVENTS_PORTION_SIZE 20
  43. /* Indexes in range definitions */
  44. #define MIN_IND 0
  45. #define MAX_IND 1
  46. /* The coordinates indexes */
  47. #define X_IND 0
  48. #define Y_IND 1
  49. #define Z_IND 2
  50. /* Shared memory limits */
  51. /* max_buf_size = (port_size(65535*2) * port_num(8) * group_size(3) */
  52. #define USF_MAX_BUF_SIZE 3145680
  53. #define USF_MAX_BUF_NUM 32
  54. /* max size for buffer set from user space */
  55. #define USF_MAX_USER_BUF_SIZE 100000
  56. /* Place for opreation result, received from QDSP6 */
  57. #define APR_RESULT_IND 1
  58. /* Place for US detection result, received from QDSP6 */
  59. #define APR_US_DETECT_RESULT_IND 0
  60. #define BITS_IN_BYTE 8
  61. /* Time to stay awake after tx read event (e.g., proximity) */
  62. #define STAY_AWAKE_AFTER_READ_MSECS 3000
  63. /* The driver states */
  64. enum usf_state_type {
  65. USF_IDLE_STATE,
  66. USF_OPENED_STATE,
  67. USF_CONFIGURED_STATE,
  68. USF_WORK_STATE,
  69. USF_ADSP_RESTART_STATE,
  70. USF_ERROR_STATE
  71. };
  72. /* The US detection status upon FW/HW based US detection results */
  73. enum usf_us_detect_type {
  74. USF_US_DETECT_UNDEF,
  75. USF_US_DETECT_YES,
  76. USF_US_DETECT_NO
  77. };
  78. struct usf_xx_type {
  79. /* Name of the client - event calculator */
  80. char client_name[USF_MAX_CLIENT_NAME_SIZE];
  81. /* The driver state in TX or RX direction */
  82. enum usf_state_type usf_state;
  83. /* wait for q6 events mechanism */
  84. wait_queue_head_t wait;
  85. /* IF with q6usm info */
  86. struct us_client *usc;
  87. /* Q6:USM' Encoder/decoder configuration */
  88. struct us_encdec_cfg encdec_cfg;
  89. /* Shared buffer (with Q6:USM) size */
  90. uint32_t buffer_size;
  91. /* Number of the shared buffers (with Q6:USM) */
  92. uint32_t buffer_count;
  93. /* Shared memory (Cyclic buffer with 1 gap) control */
  94. uint32_t new_region;
  95. uint32_t prev_region;
  96. /* Q6:USM's events handler */
  97. void (*cb)(uint32_t, uint32_t, uint32_t *, void *);
  98. /* US detection result */
  99. enum usf_us_detect_type us_detect_type;
  100. /* User's update info isn't acceptable */
  101. u8 user_upd_info_na;
  102. };
  103. struct usf_type {
  104. /* TX device component configuration & control */
  105. struct usf_xx_type usf_tx;
  106. /* RX device component configuration & control */
  107. struct usf_xx_type usf_rx;
  108. /* Index into the opened device container */
  109. /* To prevent mutual usage of the same device */
  110. uint16_t dev_ind;
  111. /* Event types, supported by device */
  112. uint16_t event_types;
  113. /* The input devices are "input" module registered clients */
  114. struct input_dev *input_ifs[USF_MAX_EVENT_IND];
  115. /* Bitmap of types of events, conflicting to USF's ones */
  116. uint16_t conflicting_event_types;
  117. /* Bitmap of types of events from devs, conflicting with USF */
  118. uint16_t conflicting_event_filters;
  119. /* The requested buttons bitmap */
  120. uint16_t req_buttons_bitmap;
  121. /* Mutex for exclusive operations (all public APIs) */
  122. struct mutex mutex;
  123. };
  124. struct usf_input_dev_type {
  125. /* Input event type, supported by the input device */
  126. uint16_t event_type;
  127. /* Input device name */
  128. const char *input_dev_name;
  129. /* Input device registration function */
  130. int (*prepare_dev)(uint16_t, struct usf_type *,
  131. struct us_input_info_type *,
  132. const char *);
  133. /* Input event notification function */
  134. void (*notify_event)(struct usf_type *,
  135. uint16_t,
  136. struct usf_event_type *
  137. );
  138. };
  139. /* The MAX number of the supported devices */
  140. #define MAX_DEVS_NUMBER 1
  141. /*
  142. * code for a special button that is used to show/hide a
  143. * hovering cursor in the input framework. Must be in
  144. * sync with the button code definition in the framework
  145. * (EventHub.h)
  146. */
  147. #define BTN_USF_HOVERING_CURSOR 0x230
  148. /* Supported buttons container */
  149. static const int s_button_map[] = {
  150. BTN_STYLUS,
  151. BTN_STYLUS2,
  152. BTN_TOOL_PEN,
  153. BTN_TOOL_RUBBER,
  154. BTN_TOOL_FINGER,
  155. BTN_USF_HOVERING_CURSOR
  156. };
  157. /* The opened devices container */
  158. static atomic_t s_opened_devs[MAX_DEVS_NUMBER];
  159. static struct wakeup_source usf_wakeup_source;
  160. #define USF_NAME_PREFIX "usf_"
  161. #define USF_NAME_PREFIX_SIZE 4
  162. static struct input_dev *allocate_dev(uint16_t ind, const char *name)
  163. {
  164. struct input_dev *in_dev = input_allocate_device();
  165. if (in_dev == NULL) {
  166. pr_err("%s: input_allocate_device() failed\n", __func__);
  167. } else {
  168. /* Common part configuration */
  169. in_dev->name = name;
  170. in_dev->phys = NULL;
  171. in_dev->id.bustype = BUS_HOST;
  172. in_dev->id.vendor = 0x0001;
  173. in_dev->id.product = 0x0001;
  174. in_dev->id.version = USF_VERSION_ID;
  175. }
  176. return in_dev;
  177. }
  178. static int prepare_tsc_input_device(uint16_t ind,
  179. struct usf_type *usf_info,
  180. struct us_input_info_type *input_info,
  181. const char *name)
  182. {
  183. int i = 0;
  184. int num_buttons = min(ARRAY_SIZE(s_button_map),
  185. sizeof(input_info->req_buttons_bitmap) *
  186. BITS_IN_BYTE);
  187. uint16_t max_buttons_bitmap = ((1 << ARRAY_SIZE(s_button_map)) - 1);
  188. struct input_dev *in_dev = allocate_dev(ind, name);
  189. if (in_dev == NULL)
  190. return -ENOMEM;
  191. if (input_info->req_buttons_bitmap > max_buttons_bitmap) {
  192. pr_err("%s: Requested buttons[%d] exceeds max buttons available[%d]\n",
  193. __func__,
  194. input_info->req_buttons_bitmap,
  195. max_buttons_bitmap);
  196. input_free_device(in_dev);
  197. return -EINVAL;
  198. }
  199. usf_info->input_ifs[ind] = in_dev;
  200. usf_info->req_buttons_bitmap =
  201. input_info->req_buttons_bitmap;
  202. in_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
  203. in_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
  204. for (i = 0; i < num_buttons; i++)
  205. if (input_info->req_buttons_bitmap & (1 << i))
  206. in_dev->keybit[BIT_WORD(s_button_map[i])] |=
  207. BIT_MASK(s_button_map[i]);
  208. input_set_abs_params(in_dev, ABS_X,
  209. input_info->tsc_x_dim[MIN_IND],
  210. input_info->tsc_x_dim[MAX_IND],
  211. 0, 0);
  212. input_set_abs_params(in_dev, ABS_Y,
  213. input_info->tsc_y_dim[MIN_IND],
  214. input_info->tsc_y_dim[MAX_IND],
  215. 0, 0);
  216. input_set_abs_params(in_dev, ABS_DISTANCE,
  217. input_info->tsc_z_dim[MIN_IND],
  218. input_info->tsc_z_dim[MAX_IND],
  219. 0, 0);
  220. input_set_abs_params(in_dev, ABS_PRESSURE,
  221. input_info->tsc_pressure[MIN_IND],
  222. input_info->tsc_pressure[MAX_IND],
  223. 0, 0);
  224. input_set_abs_params(in_dev, ABS_TILT_X,
  225. input_info->tsc_x_tilt[MIN_IND],
  226. input_info->tsc_x_tilt[MAX_IND],
  227. 0, 0);
  228. input_set_abs_params(in_dev, ABS_TILT_Y,
  229. input_info->tsc_y_tilt[MIN_IND],
  230. input_info->tsc_y_tilt[MAX_IND],
  231. 0, 0);
  232. return 0;
  233. }
  234. static int prepare_mouse_input_device(uint16_t ind, struct usf_type *usf_info,
  235. struct us_input_info_type *input_info,
  236. const char *name)
  237. {
  238. struct input_dev *in_dev = allocate_dev(ind, name);
  239. if (in_dev == NULL)
  240. return -ENOMEM;
  241. usf_info->input_ifs[ind] = in_dev;
  242. in_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
  243. in_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
  244. BIT_MASK(BTN_RIGHT) |
  245. BIT_MASK(BTN_MIDDLE);
  246. in_dev->relbit[0] = BIT_MASK(REL_X) |
  247. BIT_MASK(REL_Y) |
  248. BIT_MASK(REL_Z);
  249. return 0;
  250. }
  251. static int prepare_keyboard_input_device(
  252. uint16_t ind,
  253. struct usf_type *usf_info,
  254. struct us_input_info_type *input_info,
  255. const char *name)
  256. {
  257. struct input_dev *in_dev = allocate_dev(ind, name);
  258. if (in_dev == NULL)
  259. return -ENOMEM;
  260. usf_info->input_ifs[ind] = in_dev;
  261. in_dev->evbit[0] |= BIT_MASK(EV_KEY);
  262. /* All keys are permitted */
  263. memset(in_dev->keybit, 0xff, sizeof(in_dev->keybit));
  264. return 0;
  265. }
  266. static void notify_tsc_event(struct usf_type *usf_info,
  267. uint16_t if_ind,
  268. struct usf_event_type *event)
  269. {
  270. int i = 0;
  271. int num_buttons = min(ARRAY_SIZE(s_button_map),
  272. sizeof(usf_info->req_buttons_bitmap) *
  273. BITS_IN_BYTE);
  274. struct input_dev *input_if = usf_info->input_ifs[if_ind];
  275. struct point_event_type *pe = &(event->event_data.point_event);
  276. input_report_abs(input_if, ABS_X, pe->coordinates[X_IND]);
  277. input_report_abs(input_if, ABS_Y, pe->coordinates[Y_IND]);
  278. input_report_abs(input_if, ABS_DISTANCE, pe->coordinates[Z_IND]);
  279. input_report_abs(input_if, ABS_TILT_X, pe->inclinations[X_IND]);
  280. input_report_abs(input_if, ABS_TILT_Y, pe->inclinations[Y_IND]);
  281. input_report_abs(input_if, ABS_PRESSURE, pe->pressure);
  282. input_report_key(input_if, BTN_TOUCH, !!(pe->pressure));
  283. for (i = 0; i < num_buttons; i++) {
  284. uint16_t mask = (1 << i),
  285. btn_state = !!(pe->buttons_state_bitmap & mask);
  286. if (usf_info->req_buttons_bitmap & mask)
  287. input_report_key(input_if, s_button_map[i], btn_state);
  288. }
  289. input_sync(input_if);
  290. pr_debug("%s: TSC event: xyz[%d;%d;%d], incl[%d;%d], pressure[%d], buttons[%d]\n",
  291. __func__,
  292. pe->coordinates[X_IND],
  293. pe->coordinates[Y_IND],
  294. pe->coordinates[Z_IND],
  295. pe->inclinations[X_IND],
  296. pe->inclinations[Y_IND],
  297. pe->pressure,
  298. pe->buttons_state_bitmap);
  299. }
  300. static void notify_mouse_event(struct usf_type *usf_info,
  301. uint16_t if_ind,
  302. struct usf_event_type *event)
  303. {
  304. struct input_dev *input_if = usf_info->input_ifs[if_ind];
  305. struct mouse_event_type *me = &(event->event_data.mouse_event);
  306. input_report_rel(input_if, REL_X, me->rels[X_IND]);
  307. input_report_rel(input_if, REL_Y, me->rels[Y_IND]);
  308. input_report_rel(input_if, REL_Z, me->rels[Z_IND]);
  309. input_report_key(input_if, BTN_LEFT,
  310. me->buttons_states & USF_BUTTON_LEFT_MASK);
  311. input_report_key(input_if, BTN_MIDDLE,
  312. me->buttons_states & USF_BUTTON_MIDDLE_MASK);
  313. input_report_key(input_if, BTN_RIGHT,
  314. me->buttons_states & USF_BUTTON_RIGHT_MASK);
  315. input_sync(input_if);
  316. pr_debug("%s: mouse event: dx[%d], dy[%d], buttons_states[%d]\n",
  317. __func__, me->rels[X_IND],
  318. me->rels[Y_IND], me->buttons_states);
  319. }
  320. static void notify_key_event(struct usf_type *usf_info,
  321. uint16_t if_ind,
  322. struct usf_event_type *event)
  323. {
  324. struct input_dev *input_if = usf_info->input_ifs[if_ind];
  325. struct key_event_type *ke = &(event->event_data.key_event);
  326. input_report_key(input_if, ke->key, ke->key_state);
  327. input_sync(input_if);
  328. pr_debug("%s: key event: key[%d], state[%d]\n",
  329. __func__,
  330. ke->key,
  331. ke->key_state);
  332. }
  333. static struct usf_input_dev_type s_usf_input_devs[] = {
  334. {USF_TSC_EVENT, "usf_tsc",
  335. prepare_tsc_input_device, notify_tsc_event},
  336. {USF_TSC_PTR_EVENT, "usf_tsc_ptr",
  337. prepare_tsc_input_device, notify_tsc_event},
  338. {USF_MOUSE_EVENT, "usf_mouse",
  339. prepare_mouse_input_device, notify_mouse_event},
  340. {USF_KEYBOARD_EVENT, "usf_kb",
  341. prepare_keyboard_input_device, notify_key_event},
  342. {USF_TSC_EXT_EVENT, "usf_tsc_ext",
  343. prepare_tsc_input_device, notify_tsc_event},
  344. };
  345. static void usf_rx_cb(uint32_t opcode, uint32_t token,
  346. uint32_t *payload, void *priv)
  347. {
  348. struct usf_xx_type *usf_xx = (struct usf_xx_type *) priv;
  349. if (usf_xx == NULL) {
  350. pr_err("%s: the private data is NULL\n", __func__);
  351. return;
  352. }
  353. switch (opcode) {
  354. case Q6USM_EVENT_WRITE_DONE:
  355. wake_up(&usf_xx->wait);
  356. break;
  357. case RESET_EVENTS:
  358. pr_err("%s: received RESET_EVENTS\n", __func__);
  359. usf_xx->usf_state = USF_ADSP_RESTART_STATE;
  360. wake_up(&usf_xx->wait);
  361. break;
  362. default:
  363. break;
  364. }
  365. }
  366. static void usf_tx_cb(uint32_t opcode, uint32_t token,
  367. uint32_t *payload, void *priv)
  368. {
  369. struct usf_xx_type *usf_xx = (struct usf_xx_type *) priv;
  370. if (usf_xx == NULL) {
  371. pr_err("%s: the private data is NULL\n", __func__);
  372. return;
  373. }
  374. switch (opcode) {
  375. case Q6USM_EVENT_READ_DONE:
  376. pr_debug("%s: acquiring %d msec wake lock\n", __func__,
  377. STAY_AWAKE_AFTER_READ_MSECS);
  378. __pm_wakeup_event(&usf_wakeup_source,
  379. STAY_AWAKE_AFTER_READ_MSECS);
  380. if (token == USM_WRONG_TOKEN)
  381. usf_xx->usf_state = USF_ERROR_STATE;
  382. usf_xx->new_region = token;
  383. wake_up(&usf_xx->wait);
  384. break;
  385. case Q6USM_EVENT_SIGNAL_DETECT_RESULT:
  386. usf_xx->us_detect_type = (payload[APR_US_DETECT_RESULT_IND]) ?
  387. USF_US_DETECT_YES :
  388. USF_US_DETECT_NO;
  389. wake_up(&usf_xx->wait);
  390. break;
  391. case APR_BASIC_RSP_RESULT:
  392. if (payload[APR_RESULT_IND]) {
  393. usf_xx->usf_state = USF_ERROR_STATE;
  394. usf_xx->new_region = USM_WRONG_TOKEN;
  395. wake_up(&usf_xx->wait);
  396. }
  397. break;
  398. case RESET_EVENTS:
  399. pr_err("%s: received RESET_EVENTS\n", __func__);
  400. usf_xx->usf_state = USF_ADSP_RESTART_STATE;
  401. wake_up(&usf_xx->wait);
  402. break;
  403. default:
  404. break;
  405. }
  406. }
  407. static void release_xx(struct usf_xx_type *usf_xx)
  408. {
  409. if (usf_xx != NULL) {
  410. if (usf_xx->usc) {
  411. q6usm_us_client_free(usf_xx->usc);
  412. usf_xx->usc = NULL;
  413. }
  414. if (usf_xx->encdec_cfg.params != NULL) {
  415. kfree(usf_xx->encdec_cfg.params);
  416. usf_xx->encdec_cfg.params = NULL;
  417. }
  418. }
  419. }
  420. static void usf_disable(struct usf_xx_type *usf_xx)
  421. {
  422. if (usf_xx != NULL) {
  423. if ((usf_xx->usf_state != USF_IDLE_STATE) &&
  424. (usf_xx->usf_state != USF_OPENED_STATE)) {
  425. (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
  426. usf_xx->usf_state = USF_OPENED_STATE;
  427. wake_up(&usf_xx->wait);
  428. }
  429. release_xx(usf_xx);
  430. }
  431. }
  432. static int config_xx(struct usf_xx_type *usf_xx, struct us_xx_info_type *config)
  433. {
  434. int rc = 0;
  435. uint16_t data_map_size = 0;
  436. uint16_t min_map_size = 0;
  437. if ((usf_xx == NULL) ||
  438. (config == NULL))
  439. return -EINVAL;
  440. if ((config->buf_size == 0) ||
  441. (config->buf_size > USF_MAX_BUF_SIZE) ||
  442. (config->buf_num == 0) ||
  443. (config->buf_num > USF_MAX_BUF_NUM)) {
  444. pr_err("%s: wrong params: buf_size=%d; buf_num=%d\n",
  445. __func__, config->buf_size, config->buf_num);
  446. return -EINVAL;
  447. }
  448. data_map_size = sizeof(usf_xx->encdec_cfg.cfg_common.data_map);
  449. min_map_size = min(data_map_size, config->port_cnt);
  450. if (config->client_name != NULL) {
  451. if (strncpy_from_user(usf_xx->client_name,
  452. (char __user *)config->client_name,
  453. sizeof(usf_xx->client_name) - 1) < 0) {
  454. pr_err("%s: get client name failed\n", __func__);
  455. return -EINVAL;
  456. }
  457. }
  458. pr_debug("%s: name=%s; buf_size:%d; dev_id:0x%x; sample_rate:%d\n",
  459. __func__, usf_xx->client_name, config->buf_size,
  460. config->dev_id, config->sample_rate);
  461. pr_debug("%s: buf_num:%d; format:%d; port_cnt:%d; data_size=%d\n",
  462. __func__, config->buf_num, config->stream_format,
  463. config->port_cnt, config->params_data_size);
  464. pr_debug("%s: id[0]=%d, id[1]=%d, id[2]=%d, id[3]=%d, id[4]=%d,\n",
  465. __func__,
  466. config->port_id[0],
  467. config->port_id[1],
  468. config->port_id[2],
  469. config->port_id[3],
  470. config->port_id[4]);
  471. pr_debug("id[5]=%d, id[6]=%d, id[7]=%d\n",
  472. config->port_id[5],
  473. config->port_id[6],
  474. config->port_id[7]);
  475. /* q6usm allocation & configuration */
  476. usf_xx->buffer_size = config->buf_size;
  477. usf_xx->buffer_count = config->buf_num;
  478. usf_xx->encdec_cfg.cfg_common.bits_per_sample =
  479. config->bits_per_sample;
  480. usf_xx->encdec_cfg.cfg_common.sample_rate = config->sample_rate;
  481. /* AFE port e.g. AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX */
  482. usf_xx->encdec_cfg.cfg_common.dev_id = config->dev_id;
  483. usf_xx->encdec_cfg.cfg_common.ch_cfg = config->port_cnt;
  484. memcpy((void *)&usf_xx->encdec_cfg.cfg_common.data_map,
  485. (void *)config->port_id,
  486. min_map_size);
  487. usf_xx->encdec_cfg.format_id = config->stream_format;
  488. usf_xx->encdec_cfg.params_size = config->params_data_size;
  489. usf_xx->user_upd_info_na = 1; /* it's used in US_GET_TX_UPDATE */
  490. if (config->params_data_size > 0) { /* transparent data copy */
  491. usf_xx->encdec_cfg.params = kzalloc(config->params_data_size,
  492. GFP_KERNEL);
  493. /* False memory leak here - pointer in packed struct
  494. * is undetected by kmemleak tool
  495. */
  496. kmemleak_ignore(usf_xx->encdec_cfg.params);
  497. if (usf_xx->encdec_cfg.params == NULL) {
  498. pr_err("%s: params memory alloc[%d] failure\n",
  499. __func__,
  500. config->params_data_size);
  501. return -ENOMEM;
  502. }
  503. rc = copy_from_user(usf_xx->encdec_cfg.params,
  504. (uint8_t __user *)config->params_data,
  505. config->params_data_size);
  506. if (rc) {
  507. pr_err("%s: transparent data copy failure\n",
  508. __func__);
  509. kfree(usf_xx->encdec_cfg.params);
  510. usf_xx->encdec_cfg.params = NULL;
  511. return -EFAULT;
  512. }
  513. pr_debug("%s: params_size[%d]; params[%d,%d,%d,%d, %d]\n",
  514. __func__,
  515. config->params_data_size,
  516. usf_xx->encdec_cfg.params[0],
  517. usf_xx->encdec_cfg.params[1],
  518. usf_xx->encdec_cfg.params[2],
  519. usf_xx->encdec_cfg.params[3],
  520. usf_xx->encdec_cfg.params[4]
  521. );
  522. }
  523. usf_xx->usc = q6usm_us_client_alloc(usf_xx->cb, (void *)usf_xx);
  524. if (!usf_xx->usc) {
  525. pr_err("%s: Could not allocate q6usm client\n", __func__);
  526. rc = -EFAULT;
  527. }
  528. return rc;
  529. }
  530. static bool usf_match(uint16_t event_type_ind, struct input_dev *dev)
  531. {
  532. bool rc = false;
  533. rc = (event_type_ind < MAX_EVENT_TYPE_NUM) &&
  534. ((dev->name == NULL) ||
  535. strcmp(dev->name, USF_NAME_PREFIX));
  536. pr_debug("%s: name=[%s]; rc=%d\n",
  537. __func__, dev->name, rc);
  538. return rc;
  539. }
  540. static bool usf_register_conflicting_events(uint16_t event_types)
  541. {
  542. bool rc = true;
  543. uint16_t ind = 0;
  544. uint16_t mask = 1;
  545. for (ind = 0; ind < MAX_EVENT_TYPE_NUM; ++ind) {
  546. if (event_types & mask) {
  547. rc = usfcdev_register(ind, usf_match);
  548. if (!rc)
  549. break;
  550. }
  551. mask = mask << 1;
  552. }
  553. return rc;
  554. }
  555. static void usf_unregister_conflicting_events(uint16_t event_types)
  556. {
  557. uint16_t ind = 0;
  558. uint16_t mask = 1;
  559. for (ind = 0; ind < MAX_EVENT_TYPE_NUM; ++ind) {
  560. if (event_types & mask)
  561. usfcdev_unregister(ind);
  562. mask = mask << 1;
  563. }
  564. }
  565. static void usf_set_event_filters(struct usf_type *usf, uint16_t event_filters)
  566. {
  567. uint16_t ind = 0;
  568. uint16_t mask = 1;
  569. if (usf->conflicting_event_filters != event_filters) {
  570. for (ind = 0; ind < MAX_EVENT_TYPE_NUM; ++ind) {
  571. if (usf->conflicting_event_types & mask)
  572. usfcdev_set_filter(ind, event_filters&mask);
  573. mask = mask << 1;
  574. }
  575. usf->conflicting_event_filters = event_filters;
  576. }
  577. }
  578. static int register_input_device(struct usf_type *usf_info,
  579. struct us_input_info_type *input_info)
  580. {
  581. int rc = 0;
  582. bool ret = true;
  583. uint16_t ind = 0;
  584. if ((usf_info == NULL) ||
  585. (input_info == NULL) ||
  586. !(input_info->event_types & USF_ALL_EVENTS)) {
  587. pr_err("%s: wrong input parameter(s)\n", __func__);
  588. return -EINVAL;
  589. }
  590. for (ind = 0; ind < USF_MAX_EVENT_IND; ++ind) {
  591. if (usf_info->input_ifs[ind] != NULL) {
  592. pr_err("%s: input_if[%d] is already allocated\n",
  593. __func__, ind);
  594. return -EFAULT;
  595. }
  596. if ((input_info->event_types &
  597. s_usf_input_devs[ind].event_type) &&
  598. s_usf_input_devs[ind].prepare_dev) {
  599. rc = (*s_usf_input_devs[ind].prepare_dev)(
  600. ind,
  601. usf_info,
  602. input_info,
  603. s_usf_input_devs[ind].input_dev_name);
  604. if (rc)
  605. return rc;
  606. rc = input_register_device(usf_info->input_ifs[ind]);
  607. if (rc) {
  608. pr_err("%s: input_reg_dev() failed; rc=%d\n",
  609. __func__, rc);
  610. input_free_device(usf_info->input_ifs[ind]);
  611. usf_info->input_ifs[ind] = NULL;
  612. } else {
  613. usf_info->event_types |=
  614. s_usf_input_devs[ind].event_type;
  615. pr_debug("%s: input device[%s] was registered\n",
  616. __func__,
  617. s_usf_input_devs[ind].input_dev_name);
  618. }
  619. } /* supported event */
  620. } /* event types loop */
  621. ret = usf_register_conflicting_events(
  622. input_info->conflicting_event_types);
  623. if (ret)
  624. usf_info->conflicting_event_types =
  625. input_info->conflicting_event_types;
  626. return 0;
  627. }
  628. static void handle_input_event(struct usf_type *usf_info,
  629. uint16_t event_counter,
  630. struct usf_event_type __user *event)
  631. {
  632. uint16_t ind = 0;
  633. uint16_t events_num = 0;
  634. struct usf_event_type usf_events[USF_EVENTS_PORTION_SIZE];
  635. int rc = 0;
  636. if ((usf_info == NULL) ||
  637. (event == NULL) || (!event_counter)) {
  638. return;
  639. }
  640. while (event_counter > 0) {
  641. if (event_counter > USF_EVENTS_PORTION_SIZE) {
  642. events_num = USF_EVENTS_PORTION_SIZE;
  643. event_counter -= USF_EVENTS_PORTION_SIZE;
  644. } else {
  645. events_num = event_counter;
  646. event_counter = 0;
  647. }
  648. rc = copy_from_user(usf_events,
  649. (struct usf_event_type __user *)event,
  650. events_num * sizeof(struct usf_event_type));
  651. if (rc) {
  652. pr_err("%s: copy upd_rx_info from user; rc=%d\n",
  653. __func__, rc);
  654. return;
  655. }
  656. for (ind = 0; ind < events_num; ++ind) {
  657. struct usf_event_type *p_event = &usf_events[ind];
  658. uint16_t if_ind = p_event->event_type_ind;
  659. if ((if_ind >= USF_MAX_EVENT_IND) ||
  660. (usf_info->input_ifs[if_ind] == NULL))
  661. continue; /* event isn't supported */
  662. if (s_usf_input_devs[if_ind].notify_event)
  663. (*s_usf_input_devs[if_ind].notify_event)(
  664. usf_info,
  665. if_ind,
  666. p_event);
  667. } /* loop in the portion */
  668. } /* all events loop */
  669. }
  670. static int usf_start_tx(struct usf_xx_type *usf_xx)
  671. {
  672. int rc = q6usm_run(usf_xx->usc, 0, 0, 0);
  673. pr_debug("%s: tx: q6usm_run; rc=%d\n", __func__, rc);
  674. if (!rc) {
  675. if (usf_xx->buffer_count >= USM_MIN_BUF_CNT) {
  676. /* supply all buffers */
  677. rc = q6usm_read(usf_xx->usc,
  678. usf_xx->buffer_count);
  679. pr_debug("%s: q6usm_read[%d]\n",
  680. __func__, rc);
  681. if (rc)
  682. pr_err("%s: buf read failed",
  683. __func__);
  684. else
  685. usf_xx->usf_state =
  686. USF_WORK_STATE;
  687. } else
  688. usf_xx->usf_state =
  689. USF_WORK_STATE;
  690. }
  691. return rc;
  692. } /* usf_start_tx */
  693. static int usf_start_rx(struct usf_xx_type *usf_xx)
  694. {
  695. int rc = q6usm_run(usf_xx->usc, 0, 0, 0);
  696. pr_debug("%s: rx: q6usm_run; rc=%d\n",
  697. __func__, rc);
  698. if (!rc)
  699. usf_xx->usf_state = USF_WORK_STATE;
  700. return rc;
  701. } /* usf_start_rx */
  702. static int __usf_set_us_detection(struct usf_type *usf,
  703. struct us_detect_info_type *detect_info)
  704. {
  705. uint32_t timeout = 0;
  706. struct usm_session_cmd_detect_info *p_allocated_memory = NULL;
  707. struct usm_session_cmd_detect_info usm_detect_info;
  708. struct usm_session_cmd_detect_info *p_usm_detect_info =
  709. &usm_detect_info;
  710. uint32_t detect_info_size = sizeof(struct usm_session_cmd_detect_info);
  711. struct usf_xx_type *usf_xx = &usf->usf_tx;
  712. int rc = 0;
  713. if (detect_info->us_detector != US_DETECT_FW) {
  714. pr_err("%s: unsupported detector: %d\n",
  715. __func__, detect_info->us_detector);
  716. return -EINVAL;
  717. }
  718. if ((detect_info->params_data_size != 0) &&
  719. (detect_info->params_data != NULL)) {
  720. uint8_t *p_data = NULL;
  721. detect_info_size += detect_info->params_data_size;
  722. p_allocated_memory = kzalloc(detect_info_size, GFP_KERNEL);
  723. if (p_allocated_memory == NULL) {
  724. pr_err("%s: detect_info[%d] allocation failed\n",
  725. __func__, detect_info_size);
  726. return -ENOMEM;
  727. }
  728. p_usm_detect_info = p_allocated_memory;
  729. p_data = (uint8_t *)p_usm_detect_info +
  730. sizeof(struct usm_session_cmd_detect_info);
  731. rc = copy_from_user(p_data,
  732. (uint8_t __user *)(detect_info->params_data),
  733. detect_info->params_data_size);
  734. if (rc) {
  735. pr_err("%s: copy params from user; rc=%d\n",
  736. __func__, rc);
  737. kfree(p_allocated_memory);
  738. return -EFAULT;
  739. }
  740. p_usm_detect_info->algorithm_cfg_size =
  741. detect_info->params_data_size;
  742. } else
  743. usm_detect_info.algorithm_cfg_size = 0;
  744. p_usm_detect_info->detect_mode = detect_info->us_detect_mode;
  745. p_usm_detect_info->skip_interval = detect_info->skip_time;
  746. usf_xx->us_detect_type = USF_US_DETECT_UNDEF;
  747. rc = q6usm_set_us_detection(usf_xx->usc,
  748. p_usm_detect_info,
  749. detect_info_size);
  750. if (rc || (detect_info->detect_timeout == USF_NO_WAIT_TIMEOUT)) {
  751. kfree(p_allocated_memory);
  752. return rc;
  753. }
  754. /* Get US detection result */
  755. if (detect_info->detect_timeout == USF_INFINITIVE_TIMEOUT) {
  756. rc = wait_event_interruptible(usf_xx->wait,
  757. (usf_xx->us_detect_type !=
  758. USF_US_DETECT_UNDEF) ||
  759. (usf_xx->usf_state ==
  760. USF_ADSP_RESTART_STATE));
  761. } else {
  762. if (detect_info->detect_timeout == USF_DEFAULT_TIMEOUT)
  763. timeout = USF_TIMEOUT_JIFFIES;
  764. else
  765. timeout = detect_info->detect_timeout * HZ;
  766. }
  767. rc = wait_event_interruptible_timeout(usf_xx->wait,
  768. (usf_xx->us_detect_type !=
  769. USF_US_DETECT_UNDEF) ||
  770. (usf_xx->usf_state ==
  771. USF_ADSP_RESTART_STATE), timeout);
  772. /* In the case of aDSP restart, "no US" is assumed */
  773. if (usf_xx->usf_state == USF_ADSP_RESTART_STATE)
  774. rc = -EFAULT;
  775. /* In the case of timeout, "no US" is assumed */
  776. if (rc < 0)
  777. pr_err("%s: Getting US detection failed rc[%d]\n",
  778. __func__, rc);
  779. else {
  780. usf->usf_rx.us_detect_type = usf->usf_tx.us_detect_type;
  781. detect_info->is_us =
  782. (usf_xx->us_detect_type == USF_US_DETECT_YES);
  783. }
  784. kfree(p_allocated_memory);
  785. return rc;
  786. } /* __usf_set_us_detection */
  787. static int usf_set_us_detection(struct usf_type *usf, unsigned long arg)
  788. {
  789. struct us_detect_info_type detect_info;
  790. int rc = copy_from_user(&detect_info,
  791. (struct us_detect_info_type __user *) arg,
  792. sizeof(detect_info));
  793. if (rc) {
  794. pr_err("%s: copy detect_info from user; rc=%d\n",
  795. __func__, rc);
  796. return -EFAULT;
  797. }
  798. if (detect_info.params_data_size > USF_MAX_USER_BUF_SIZE) {
  799. pr_err("%s: user buffer size exceeds maximum\n",
  800. __func__);
  801. return -EFAULT;
  802. }
  803. rc = __usf_set_us_detection(usf, &detect_info);
  804. if (rc < 0) {
  805. pr_err("%s: set us detection failed; rc=%d\n",
  806. __func__, rc);
  807. return rc;
  808. }
  809. rc = copy_to_user((void __user *)arg,
  810. &detect_info,
  811. sizeof(detect_info));
  812. if (rc) {
  813. pr_err("%s: copy detect_info to user; rc=%d\n",
  814. __func__, rc);
  815. rc = -EFAULT;
  816. }
  817. return rc;
  818. } /* usf_set_us_detection */
  819. static int __usf_set_tx_info(struct usf_type *usf,
  820. struct us_tx_info_type *config_tx)
  821. {
  822. struct usf_xx_type *usf_xx = &usf->usf_tx;
  823. int rc = 0;
  824. usf_xx->new_region = USM_UNDEF_TOKEN;
  825. usf_xx->prev_region = USM_UNDEF_TOKEN;
  826. usf_xx->cb = usf_tx_cb;
  827. init_waitqueue_head(&usf_xx->wait);
  828. if (config_tx->us_xx_info.client_name != NULL) {
  829. int res = strncpy_from_user(
  830. usf_xx->client_name,
  831. (char __user *)(config_tx->us_xx_info.client_name),
  832. sizeof(usf_xx->client_name)-1);
  833. if (res < 0) {
  834. pr_err("%s: get client name failed\n",
  835. __func__);
  836. return -EINVAL;
  837. }
  838. }
  839. rc = config_xx(usf_xx, &(config_tx->us_xx_info));
  840. if (rc)
  841. return rc;
  842. rc = q6usm_open_read(usf_xx->usc,
  843. usf_xx->encdec_cfg.format_id);
  844. if (rc)
  845. return rc;
  846. rc = q6usm_us_client_buf_alloc(OUT, usf_xx->usc,
  847. usf_xx->buffer_size,
  848. usf_xx->buffer_count);
  849. if (rc) {
  850. (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
  851. return rc;
  852. }
  853. rc = q6usm_us_param_buf_alloc(OUT, usf_xx->usc,
  854. config_tx->us_xx_info.max_get_set_param_buf_size);
  855. if (rc) {
  856. (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
  857. return rc;
  858. }
  859. rc = q6usm_enc_cfg_blk(usf_xx->usc,
  860. &usf_xx->encdec_cfg);
  861. if (!rc &&
  862. (config_tx->input_info.event_types != USF_NO_EVENT)) {
  863. rc = register_input_device(usf,
  864. &(config_tx->input_info));
  865. }
  866. if (rc)
  867. (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
  868. else
  869. usf_xx->usf_state = USF_CONFIGURED_STATE;
  870. return rc;
  871. } /* __usf_set_tx_info */
  872. static int usf_set_tx_info(struct usf_type *usf, unsigned long arg)
  873. {
  874. struct us_tx_info_type config_tx;
  875. int rc = copy_from_user(&config_tx,
  876. (struct us_tx_info_type __user *) arg,
  877. sizeof(config_tx));
  878. if (rc) {
  879. pr_err("%s: copy config_tx from user; rc=%d\n",
  880. __func__, rc);
  881. return -EFAULT;
  882. }
  883. if (config_tx.us_xx_info.params_data_size > USF_MAX_USER_BUF_SIZE) {
  884. pr_err("%s: user buffer size exceeds maximum\n",
  885. __func__);
  886. return -EFAULT;
  887. }
  888. return __usf_set_tx_info(usf, &config_tx);
  889. } /* usf_set_tx_info */
  890. static int __usf_set_rx_info(struct usf_type *usf,
  891. struct us_rx_info_type *config_rx)
  892. {
  893. struct usf_xx_type *usf_xx = &usf->usf_rx;
  894. int rc = 0;
  895. usf_xx->new_region = USM_UNDEF_TOKEN;
  896. usf_xx->prev_region = USM_UNDEF_TOKEN;
  897. usf_xx->cb = usf_rx_cb;
  898. rc = config_xx(usf_xx, &(config_rx->us_xx_info));
  899. if (rc)
  900. return rc;
  901. rc = q6usm_open_write(usf_xx->usc,
  902. usf_xx->encdec_cfg.format_id);
  903. if (rc)
  904. return rc;
  905. rc = q6usm_us_client_buf_alloc(
  906. IN,
  907. usf_xx->usc,
  908. usf_xx->buffer_size,
  909. usf_xx->buffer_count);
  910. if (rc) {
  911. (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
  912. return rc;
  913. }
  914. rc = q6usm_us_param_buf_alloc(IN, usf_xx->usc,
  915. config_rx->us_xx_info.max_get_set_param_buf_size);
  916. if (rc) {
  917. (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
  918. return rc;
  919. }
  920. rc = q6usm_dec_cfg_blk(usf_xx->usc,
  921. &usf_xx->encdec_cfg);
  922. if (rc)
  923. (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
  924. else {
  925. init_waitqueue_head(&usf_xx->wait);
  926. usf_xx->usf_state = USF_CONFIGURED_STATE;
  927. }
  928. return rc;
  929. } /* __usf_set_rx_info */
  930. static int usf_set_rx_info(struct usf_type *usf, unsigned long arg)
  931. {
  932. struct us_rx_info_type config_rx;
  933. int rc = copy_from_user(&config_rx,
  934. (struct us_rx_info_type __user *) arg,
  935. sizeof(config_rx));
  936. if (rc) {
  937. pr_err("%s: copy config_rx from user; rc=%d\n",
  938. __func__, rc);
  939. return -EFAULT;
  940. }
  941. if (config_rx.us_xx_info.params_data_size > USF_MAX_USER_BUF_SIZE) {
  942. pr_err("%s: user buffer size exceeds maximum\n",
  943. __func__);
  944. return -EFAULT;
  945. }
  946. return __usf_set_rx_info(usf, &config_rx);
  947. } /* usf_set_rx_info */
  948. static int __usf_get_tx_update(struct usf_type *usf,
  949. struct us_tx_update_info_type *upd_tx_info)
  950. {
  951. unsigned long prev_jiffies = 0;
  952. uint32_t timeout = 0;
  953. struct usf_xx_type *usf_xx = &usf->usf_tx;
  954. int rc = 0;
  955. if (!usf_xx->user_upd_info_na) {
  956. usf_set_event_filters(usf, upd_tx_info->event_filters);
  957. handle_input_event(usf,
  958. upd_tx_info->event_counter,
  959. upd_tx_info->event);
  960. /* Release available regions */
  961. rc = q6usm_read(usf_xx->usc,
  962. upd_tx_info->free_region);
  963. if (rc)
  964. return rc;
  965. } else
  966. usf_xx->user_upd_info_na = 0;
  967. /* Get data ready regions */
  968. if (upd_tx_info->timeout == USF_INFINITIVE_TIMEOUT) {
  969. rc = wait_event_interruptible(usf_xx->wait,
  970. (usf_xx->prev_region !=
  971. usf_xx->new_region) ||
  972. (usf_xx->usf_state !=
  973. USF_WORK_STATE));
  974. } else {
  975. if (upd_tx_info->timeout == USF_NO_WAIT_TIMEOUT)
  976. rc = (usf_xx->prev_region != usf_xx->new_region);
  977. else {
  978. prev_jiffies = jiffies;
  979. if (upd_tx_info->timeout == USF_DEFAULT_TIMEOUT) {
  980. timeout = USF_TIMEOUT_JIFFIES;
  981. rc = wait_event_timeout(
  982. usf_xx->wait,
  983. (usf_xx->prev_region !=
  984. usf_xx->new_region) ||
  985. (usf_xx->usf_state !=
  986. USF_WORK_STATE),
  987. timeout);
  988. } else {
  989. timeout = upd_tx_info->timeout * HZ;
  990. rc = wait_event_interruptible_timeout(
  991. usf_xx->wait,
  992. (usf_xx->prev_region !=
  993. usf_xx->new_region) ||
  994. (usf_xx->usf_state !=
  995. USF_WORK_STATE),
  996. timeout);
  997. }
  998. }
  999. if (!rc) {
  1000. pr_debug("%s: timeout. prev_j=%lu; j=%lu\n",
  1001. __func__, prev_jiffies, jiffies);
  1002. pr_debug("%s: timeout. prev=%d; new=%d\n",
  1003. __func__, usf_xx->prev_region,
  1004. usf_xx->new_region);
  1005. pr_debug("%s: timeout. free_region=%d;\n",
  1006. __func__, upd_tx_info->free_region);
  1007. if (usf_xx->prev_region ==
  1008. usf_xx->new_region) {
  1009. pr_err("%s:read data: timeout\n",
  1010. __func__);
  1011. return -ETIME;
  1012. }
  1013. }
  1014. }
  1015. if ((usf_xx->usf_state != USF_WORK_STATE) ||
  1016. (rc == -ERESTARTSYS)) {
  1017. pr_err("%s: Get ready region failure; state[%d]; rc[%d]\n",
  1018. __func__, usf_xx->usf_state, rc);
  1019. return -EINTR;
  1020. }
  1021. upd_tx_info->ready_region = usf_xx->new_region;
  1022. usf_xx->prev_region = upd_tx_info->ready_region;
  1023. if (upd_tx_info->ready_region == USM_WRONG_TOKEN) {
  1024. pr_err("%s: TX path corrupted; prev=%d\n",
  1025. __func__, usf_xx->prev_region);
  1026. return -EIO;
  1027. }
  1028. return rc;
  1029. } /* __usf_get_tx_update */
  1030. static int usf_get_tx_update(struct usf_type *usf, unsigned long arg)
  1031. {
  1032. struct us_tx_update_info_type upd_tx_info;
  1033. int rc = copy_from_user(&upd_tx_info,
  1034. (struct us_tx_update_info_type __user *) arg,
  1035. sizeof(upd_tx_info));
  1036. if (rc < 0) {
  1037. pr_err("%s: copy upd_tx_info from user; rc=%d\n",
  1038. __func__, rc);
  1039. return -EFAULT;
  1040. }
  1041. rc = __usf_get_tx_update(usf, &upd_tx_info);
  1042. if (rc < 0) {
  1043. pr_err("%s: get tx update failed; rc=%d\n",
  1044. __func__, rc);
  1045. return rc;
  1046. }
  1047. rc = copy_to_user((void __user *)arg,
  1048. &upd_tx_info,
  1049. sizeof(upd_tx_info));
  1050. if (rc) {
  1051. pr_err("%s: copy upd_tx_info to user; rc=%d\n",
  1052. __func__, rc);
  1053. rc = -EFAULT;
  1054. }
  1055. return rc;
  1056. } /* usf_get_tx_update */
  1057. static int __usf_set_rx_update(struct usf_xx_type *usf_xx,
  1058. struct us_rx_update_info_type *upd_rx_info)
  1059. {
  1060. int rc = 0;
  1061. /* Send available data regions */
  1062. if (upd_rx_info->ready_region !=
  1063. usf_xx->buffer_count) {
  1064. rc = q6usm_write(
  1065. usf_xx->usc,
  1066. upd_rx_info->ready_region);
  1067. if (rc)
  1068. return rc;
  1069. }
  1070. /* Get free regions */
  1071. rc = wait_event_timeout(
  1072. usf_xx->wait,
  1073. !q6usm_is_write_buf_full(
  1074. usf_xx->usc,
  1075. &(upd_rx_info->free_region)) ||
  1076. (usf_xx->usf_state == USF_IDLE_STATE),
  1077. USF_TIMEOUT_JIFFIES);
  1078. if (!rc) {
  1079. rc = -ETIME;
  1080. pr_err("%s:timeout. wait for write buf not full\n",
  1081. __func__);
  1082. } else {
  1083. if (usf_xx->usf_state !=
  1084. USF_WORK_STATE) {
  1085. pr_err("%s: RX: state[%d]\n",
  1086. __func__,
  1087. usf_xx->usf_state);
  1088. rc = -EINTR;
  1089. }
  1090. }
  1091. return rc;
  1092. } /* __usf_set_rx_update */
  1093. static int usf_set_rx_update(struct usf_xx_type *usf_xx, unsigned long arg)
  1094. {
  1095. struct us_rx_update_info_type upd_rx_info;
  1096. int rc = copy_from_user(&upd_rx_info,
  1097. (struct us_rx_update_info_type __user *) arg,
  1098. sizeof(upd_rx_info));
  1099. if (rc) {
  1100. pr_err("%s: copy upd_rx_info from user; rc=%d\n",
  1101. __func__, rc);
  1102. return -EFAULT;
  1103. }
  1104. rc = __usf_set_rx_update(usf_xx, &upd_rx_info);
  1105. if (rc < 0) {
  1106. pr_err("%s: set rx update failed; rc=%d\n",
  1107. __func__, rc);
  1108. return rc;
  1109. }
  1110. rc = copy_to_user((void __user *)arg,
  1111. &upd_rx_info,
  1112. sizeof(upd_rx_info));
  1113. if (rc) {
  1114. pr_err("%s: copy rx_info to user; rc=%d\n",
  1115. __func__, rc);
  1116. rc = -EFAULT;
  1117. }
  1118. return rc;
  1119. } /* usf_set_rx_update */
  1120. static void usf_release_input(struct usf_type *usf)
  1121. {
  1122. uint16_t ind = 0;
  1123. usf_unregister_conflicting_events(
  1124. usf->conflicting_event_types);
  1125. usf->conflicting_event_types = 0;
  1126. for (ind = 0; ind < USF_MAX_EVENT_IND; ++ind) {
  1127. if (usf->input_ifs[ind] == NULL)
  1128. continue;
  1129. input_unregister_device(usf->input_ifs[ind]);
  1130. usf->input_ifs[ind] = NULL;
  1131. pr_debug("%s input_unregister_device[%s]\n",
  1132. __func__,
  1133. s_usf_input_devs[ind].input_dev_name);
  1134. }
  1135. } /* usf_release_input */
  1136. static int usf_stop_tx(struct usf_type *usf)
  1137. {
  1138. struct usf_xx_type *usf_xx = &usf->usf_tx;
  1139. usf_release_input(usf);
  1140. usf_disable(usf_xx);
  1141. return 0;
  1142. } /* usf_stop_tx */
  1143. static int __usf_get_version(struct us_version_info_type *version_info)
  1144. {
  1145. int rc = 0;
  1146. if (version_info->buf_size < sizeof(DRV_VERSION)) {
  1147. pr_err("%s: buf_size (%d) < version string size (%zu)\n",
  1148. __func__, version_info->buf_size, sizeof(DRV_VERSION));
  1149. return -EINVAL;
  1150. }
  1151. rc = copy_to_user((void __user *)(version_info->pbuf),
  1152. DRV_VERSION,
  1153. sizeof(DRV_VERSION));
  1154. if (rc) {
  1155. pr_err("%s: copy to version_info.pbuf; rc=%d\n",
  1156. __func__, rc);
  1157. rc = -EFAULT;
  1158. }
  1159. return rc;
  1160. } /* __usf_get_version */
  1161. static int usf_get_version(unsigned long arg)
  1162. {
  1163. struct us_version_info_type version_info;
  1164. int rc = copy_from_user(&version_info,
  1165. (struct us_version_info_type __user *) arg,
  1166. sizeof(version_info));
  1167. if (rc) {
  1168. pr_err("%s: copy version_info from user; rc=%d\n",
  1169. __func__, rc);
  1170. return -EFAULT;
  1171. }
  1172. rc = __usf_get_version(&version_info);
  1173. if (rc < 0) {
  1174. pr_err("%s: get version failed; rc=%d\n",
  1175. __func__, rc);
  1176. return rc;
  1177. }
  1178. rc = copy_to_user((void __user *)arg,
  1179. &version_info,
  1180. sizeof(version_info));
  1181. if (rc) {
  1182. pr_err("%s: copy version_info to user; rc=%d\n",
  1183. __func__, rc);
  1184. rc = -EFAULT;
  1185. }
  1186. return rc;
  1187. } /* usf_get_version */
  1188. static int __usf_set_stream_param(struct usf_xx_type *usf_xx,
  1189. struct us_stream_param_type *set_stream_param,
  1190. int dir)
  1191. {
  1192. struct us_client *usc = usf_xx->usc;
  1193. struct us_port_data *port;
  1194. int rc = 0;
  1195. if (usc == NULL) {
  1196. pr_err("%s: usc is null\n",
  1197. __func__);
  1198. return -EFAULT;
  1199. }
  1200. port = &usc->port[dir];
  1201. if (port == NULL) {
  1202. pr_err("%s: port is null\n",
  1203. __func__);
  1204. return -EFAULT;
  1205. }
  1206. if (port->param_buf == NULL) {
  1207. pr_err("%s: parameter buffer is null\n",
  1208. __func__);
  1209. return -EFAULT;
  1210. }
  1211. if (set_stream_param->buf_size > port->param_buf_size) {
  1212. pr_err("%s: buf_size (%d) > maximum buf size (%d)\n",
  1213. __func__, set_stream_param->buf_size,
  1214. port->param_buf_size);
  1215. return -EINVAL;
  1216. }
  1217. if (set_stream_param->buf_size == 0) {
  1218. pr_err("%s: buf_size is 0\n", __func__);
  1219. return -EINVAL;
  1220. }
  1221. rc = copy_from_user(port->param_buf,
  1222. (uint8_t __user *) set_stream_param->pbuf,
  1223. set_stream_param->buf_size);
  1224. if (rc) {
  1225. pr_err("%s: copy param buf from user; rc=%d\n",
  1226. __func__, rc);
  1227. return -EFAULT;
  1228. }
  1229. rc = q6usm_set_us_stream_param(dir, usc, set_stream_param->module_id,
  1230. set_stream_param->param_id,
  1231. set_stream_param->buf_size);
  1232. if (rc) {
  1233. pr_err("%s: q6usm_set_us_stream_param failed; rc=%d\n",
  1234. __func__, rc);
  1235. return -EFAULT;
  1236. }
  1237. return rc;
  1238. }
  1239. static int usf_set_stream_param(struct usf_xx_type *usf_xx,
  1240. unsigned long arg, int dir)
  1241. {
  1242. struct us_stream_param_type set_stream_param;
  1243. int rc = 0;
  1244. rc = copy_from_user(&set_stream_param,
  1245. (struct us_stream_param_type __user *) arg,
  1246. sizeof(set_stream_param));
  1247. if (rc) {
  1248. pr_err("%s: copy set_stream_param from user; rc=%d\n",
  1249. __func__, rc);
  1250. return -EFAULT;
  1251. }
  1252. return __usf_set_stream_param(usf_xx, &set_stream_param, dir);
  1253. } /* usf_set_stream_param */
  1254. static int __usf_get_stream_param(struct usf_xx_type *usf_xx,
  1255. struct us_stream_param_type *get_stream_param,
  1256. int dir)
  1257. {
  1258. struct us_client *usc = usf_xx->usc;
  1259. struct us_port_data *port;
  1260. int rc = 0;
  1261. if (usc == NULL) {
  1262. pr_err("%s: us_client is null\n",
  1263. __func__);
  1264. return -EFAULT;
  1265. }
  1266. port = &usc->port[dir];
  1267. if (port->param_buf == NULL) {
  1268. pr_err("%s: parameter buffer is null\n",
  1269. __func__);
  1270. return -EFAULT;
  1271. }
  1272. if (get_stream_param->buf_size > port->param_buf_size) {
  1273. pr_err("%s: buf_size (%d) > maximum buf size (%d)\n",
  1274. __func__, get_stream_param->buf_size,
  1275. port->param_buf_size);
  1276. return -EINVAL;
  1277. }
  1278. if (get_stream_param->buf_size == 0) {
  1279. pr_err("%s: buf_size is 0\n", __func__);
  1280. return -EINVAL;
  1281. }
  1282. rc = q6usm_get_us_stream_param(dir, usc, get_stream_param->module_id,
  1283. get_stream_param->param_id,
  1284. get_stream_param->buf_size);
  1285. if (rc) {
  1286. pr_err("%s: q6usm_get_us_stream_param failed; rc=%d\n",
  1287. __func__, rc);
  1288. return -EFAULT;
  1289. }
  1290. rc = copy_to_user((uint8_t __user *) get_stream_param->pbuf,
  1291. port->param_buf,
  1292. get_stream_param->buf_size);
  1293. if (rc) {
  1294. pr_err("%s: copy param buf to user; rc=%d\n",
  1295. __func__, rc);
  1296. return -EFAULT;
  1297. }
  1298. return rc;
  1299. }
  1300. static int usf_get_stream_param(struct usf_xx_type *usf_xx,
  1301. unsigned long arg, int dir)
  1302. {
  1303. struct us_stream_param_type get_stream_param;
  1304. int rc = 0;
  1305. rc = copy_from_user(&get_stream_param,
  1306. (struct us_stream_param_type __user *) arg,
  1307. sizeof(get_stream_param));
  1308. if (rc) {
  1309. pr_err("%s: copy get_stream_param from user; rc=%d\n",
  1310. __func__, rc);
  1311. return -EFAULT;
  1312. }
  1313. return __usf_get_stream_param(usf_xx, &get_stream_param, dir);
  1314. } /* usf_get_stream_param */
  1315. static long __usf_ioctl(struct usf_type *usf,
  1316. unsigned int cmd,
  1317. unsigned long arg)
  1318. {
  1319. int rc = 0;
  1320. struct usf_xx_type *usf_xx = NULL;
  1321. switch (cmd) {
  1322. case US_START_TX: {
  1323. usf_xx = &usf->usf_tx;
  1324. if (usf_xx->usf_state == USF_CONFIGURED_STATE)
  1325. rc = usf_start_tx(usf_xx);
  1326. else {
  1327. pr_err("%s: start_tx: wrong state[%d]\n",
  1328. __func__,
  1329. usf_xx->usf_state);
  1330. return -EBADFD;
  1331. }
  1332. break;
  1333. }
  1334. case US_START_RX: {
  1335. usf_xx = &usf->usf_rx;
  1336. if (usf_xx->usf_state == USF_CONFIGURED_STATE)
  1337. rc = usf_start_rx(usf_xx);
  1338. else {
  1339. pr_err("%s: start_rx: wrong state[%d]\n",
  1340. __func__,
  1341. usf_xx->usf_state);
  1342. return -EBADFD;
  1343. }
  1344. break;
  1345. }
  1346. case US_SET_TX_INFO: {
  1347. usf_xx = &usf->usf_tx;
  1348. if (usf_xx->usf_state == USF_OPENED_STATE)
  1349. rc = usf_set_tx_info(usf, arg);
  1350. else {
  1351. pr_err("%s: set_tx_info: wrong state[%d]\n",
  1352. __func__,
  1353. usf_xx->usf_state);
  1354. return -EBADFD;
  1355. }
  1356. break;
  1357. } /* US_SET_TX_INFO */
  1358. case US_SET_RX_INFO: {
  1359. usf_xx = &usf->usf_rx;
  1360. if (usf_xx->usf_state == USF_OPENED_STATE)
  1361. rc = usf_set_rx_info(usf, arg);
  1362. else {
  1363. pr_err("%s: set_rx_info: wrong state[%d]\n",
  1364. __func__,
  1365. usf_xx->usf_state);
  1366. return -EBADFD;
  1367. }
  1368. break;
  1369. } /* US_SET_RX_INFO */
  1370. case US_GET_TX_UPDATE: {
  1371. struct usf_xx_type *usf_xx = &usf->usf_tx;
  1372. if (usf_xx->usf_state == USF_WORK_STATE)
  1373. rc = usf_get_tx_update(usf, arg);
  1374. else {
  1375. pr_err("%s: get_tx_update: wrong state[%d]\n", __func__,
  1376. usf_xx->usf_state);
  1377. rc = -EBADFD;
  1378. }
  1379. break;
  1380. } /* US_GET_TX_UPDATE */
  1381. case US_SET_RX_UPDATE: {
  1382. struct usf_xx_type *usf_xx = &usf->usf_rx;
  1383. if (usf_xx->usf_state == USF_WORK_STATE)
  1384. rc = usf_set_rx_update(usf_xx, arg);
  1385. else {
  1386. pr_err("%s: set_rx_update: wrong state[%d]\n",
  1387. __func__,
  1388. usf_xx->usf_state);
  1389. rc = -EBADFD;
  1390. }
  1391. break;
  1392. } /* US_SET_RX_UPDATE */
  1393. case US_STOP_TX: {
  1394. usf_xx = &usf->usf_tx;
  1395. if ((usf_xx->usf_state == USF_WORK_STATE)
  1396. || (usf_xx->usf_state == USF_ADSP_RESTART_STATE))
  1397. rc = usf_stop_tx(usf);
  1398. else {
  1399. pr_err("%s: stop_tx: wrong state[%d]\n",
  1400. __func__,
  1401. usf_xx->usf_state);
  1402. return -EBADFD;
  1403. }
  1404. break;
  1405. } /* US_STOP_TX */
  1406. case US_STOP_RX: {
  1407. usf_xx = &usf->usf_rx;
  1408. if ((usf_xx->usf_state == USF_WORK_STATE)
  1409. || (usf_xx->usf_state == USF_ADSP_RESTART_STATE))
  1410. usf_disable(usf_xx);
  1411. else {
  1412. pr_err("%s: stop_rx: wrong state[%d]\n",
  1413. __func__,
  1414. usf_xx->usf_state);
  1415. return -EBADFD;
  1416. }
  1417. break;
  1418. } /* US_STOP_RX */
  1419. case US_SET_DETECTION: {
  1420. struct usf_xx_type *usf_xx = &usf->usf_tx;
  1421. if (usf_xx->usf_state == USF_WORK_STATE)
  1422. rc = usf_set_us_detection(usf, arg);
  1423. else {
  1424. pr_err("%s: set us detection: wrong state[%d]\n",
  1425. __func__,
  1426. usf_xx->usf_state);
  1427. rc = -EBADFD;
  1428. }
  1429. break;
  1430. } /* US_SET_DETECTION */
  1431. case US_GET_VERSION: {
  1432. rc = usf_get_version(arg);
  1433. break;
  1434. } /* US_GET_VERSION */
  1435. case US_SET_TX_STREAM_PARAM: {
  1436. rc = usf_set_stream_param(&usf->usf_tx, arg, OUT);
  1437. break;
  1438. } /* US_SET_TX_STREAM_PARAM */
  1439. case US_GET_TX_STREAM_PARAM: {
  1440. rc = usf_get_stream_param(&usf->usf_tx, arg, OUT);
  1441. break;
  1442. } /* US_GET_TX_STREAM_PARAM */
  1443. case US_SET_RX_STREAM_PARAM: {
  1444. rc = usf_set_stream_param(&usf->usf_rx, arg, IN);
  1445. break;
  1446. } /* US_SET_RX_STREAM_PARAM */
  1447. case US_GET_RX_STREAM_PARAM: {
  1448. rc = usf_get_stream_param(&usf->usf_rx, arg, IN);
  1449. break;
  1450. } /* US_GET_RX_STREAM_PARAM */
  1451. default:
  1452. pr_err("%s: unsupported IOCTL command [%d]\n",
  1453. __func__,
  1454. cmd);
  1455. rc = -ENOTTY;
  1456. break;
  1457. }
  1458. if (rc &&
  1459. ((cmd == US_SET_TX_INFO) ||
  1460. (cmd == US_SET_RX_INFO)))
  1461. release_xx(usf_xx);
  1462. return rc;
  1463. } /* __usf_ioctl */
  1464. static long usf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1465. {
  1466. struct usf_type *usf = file->private_data;
  1467. int rc = 0;
  1468. mutex_lock(&usf->mutex);
  1469. rc = __usf_ioctl(usf, cmd, arg);
  1470. mutex_unlock(&usf->mutex);
  1471. return rc;
  1472. } /* usf_ioctl */
  1473. #ifdef CONFIG_COMPAT
  1474. #define US_SET_TX_INFO32 _IOW(USF_IOCTL_MAGIC, 0, \
  1475. struct us_tx_info_type32)
  1476. #define US_GET_TX_UPDATE32 _IOWR(USF_IOCTL_MAGIC, 2, \
  1477. struct us_tx_update_info_type32)
  1478. #define US_SET_RX_INFO32 _IOW(USF_IOCTL_MAGIC, 3, \
  1479. struct us_rx_info_type32)
  1480. #define US_SET_RX_UPDATE32 _IOWR(USF_IOCTL_MAGIC, 4, \
  1481. struct us_rx_update_info_type32)
  1482. #define US_SET_DETECTION32 _IOWR(USF_IOCTL_MAGIC, 8, \
  1483. struct us_detect_info_type32)
  1484. #define US_GET_VERSION32 _IOWR(USF_IOCTL_MAGIC, 9, \
  1485. struct us_version_info_type32)
  1486. #define US_SET_TX_STREAM_PARAM32 _IOW(USF_IOCTL_MAGIC, 10, \
  1487. struct us_stream_param_type32)
  1488. #define US_GET_TX_STREAM_PARAM32 _IOWR(USF_IOCTL_MAGIC, 11, \
  1489. struct us_stream_param_type32)
  1490. #define US_SET_RX_STREAM_PARAM32 _IOW(USF_IOCTL_MAGIC, 12, \
  1491. struct us_stream_param_type32)
  1492. #define US_GET_RX_STREAM_PARAM32 _IOWR(USF_IOCTL_MAGIC, 13, \
  1493. struct us_stream_param_type32)
  1494. /* Info structure common for TX and RX */
  1495. struct us_xx_info_type32 {
  1496. /* Input: general info */
  1497. /* Name of the client - event calculator, ptr to char */
  1498. const compat_uptr_t client_name;
  1499. /* Selected device identification, accepted in the kernel's CAD */
  1500. uint32_t dev_id;
  1501. /* 0 - point_epos type; (e.g. 1 - gr_mmrd) */
  1502. uint32_t stream_format;
  1503. /* Required sample rate in Hz */
  1504. uint32_t sample_rate;
  1505. /* Size of a buffer (bytes) for US data transfer between the module and USF */
  1506. uint32_t buf_size;
  1507. /* Number of the buffers for the US data transfer */
  1508. uint16_t buf_num;
  1509. /* Number of the microphones (TX) or speakers(RX) */
  1510. uint16_t port_cnt;
  1511. /* Microphones(TX) or speakers(RX) indexes in their enumeration */
  1512. uint8_t port_id[USF_MAX_PORT_NUM];
  1513. /* Bits per sample 16 or 32 */
  1514. uint16_t bits_per_sample;
  1515. /* Input: Transparent info for encoder in the LPASS */
  1516. /* Parameters data size in bytes */
  1517. uint16_t params_data_size;
  1518. /* Pointer to the parameters, ptr to uint8_t */
  1519. compat_uptr_t params_data;
  1520. /* Max size of buffer for get and set parameter */
  1521. uint32_t max_get_set_param_buf_size;
  1522. };
  1523. struct us_tx_info_type32 {
  1524. /* Common info. This struct includes ptr and therefore the 32 version */
  1525. struct us_xx_info_type32 us_xx_info;
  1526. /* Info specific for TX. This struct doesn't include long or ptr
  1527. * and therefore no 32 version
  1528. */
  1529. struct us_input_info_type input_info;
  1530. };
  1531. struct us_tx_update_info_type32 {
  1532. /* Input general: */
  1533. /* Number of calculated events */
  1534. uint16_t event_counter;
  1535. /* Calculated events or NULL, ptr to struct usf_event_type */
  1536. compat_uptr_t event;
  1537. /* Pointer (read index) to the end of available region */
  1538. /* in the shared US data memory */
  1539. uint32_t free_region;
  1540. /* Time (sec) to wait for data or special values: */
  1541. /* USF_NO_WAIT_TIMEOUT, USF_INFINITIVE_TIMEOUT, USF_DEFAULT_TIMEOUT */
  1542. uint32_t timeout;
  1543. /* Events (from conflicting devs) to be disabled/enabled */
  1544. uint16_t event_filters;
  1545. /* Input transparent data: */
  1546. /* Parameters size */
  1547. uint16_t params_data_size;
  1548. /* Pointer to the parameters, ptr to uint8_t */
  1549. compat_uptr_t params_data;
  1550. /* Output parameters: */
  1551. /* Pointer (write index) to the end of ready US data region */
  1552. /* in the shared memory */
  1553. uint32_t ready_region;
  1554. };
  1555. struct us_rx_info_type32 {
  1556. /* Common info */
  1557. struct us_xx_info_type32 us_xx_info;
  1558. /* Info specific for RX*/
  1559. };
  1560. struct us_rx_update_info_type32 {
  1561. /* Input general: */
  1562. /* Pointer (write index) to the end of ready US data region */
  1563. /* in the shared memory */
  1564. uint32_t ready_region;
  1565. /* Input transparent data: */
  1566. /* Parameters size */
  1567. uint16_t params_data_size;
  1568. /* pPointer to the parameters, ptr to uint8_t */
  1569. compat_uptr_t params_data;
  1570. /* Output parameters: */
  1571. /* Pointer (read index) to the end of available region */
  1572. /* in the shared US data memory */
  1573. uint32_t free_region;
  1574. };
  1575. struct us_detect_info_type32 {
  1576. /* US detection place (HW|FW) */
  1577. /* NA in the Active and OFF states */
  1578. enum us_detect_place_enum us_detector;
  1579. /* US detection mode */
  1580. enum us_detect_mode_enum us_detect_mode;
  1581. /* US data dropped during this time (msec) */
  1582. uint32_t skip_time;
  1583. /* Transparent data size */
  1584. uint16_t params_data_size;
  1585. /* Pointer to the transparent data, ptr to uint8_t */
  1586. compat_uptr_t params_data;
  1587. /* Time (sec) to wait for US presence event */
  1588. uint32_t detect_timeout;
  1589. /* Out parameter: US presence */
  1590. bool is_us;
  1591. };
  1592. struct us_version_info_type32 {
  1593. /* Size of memory for the version string */
  1594. uint16_t buf_size;
  1595. /* Pointer to the memory for the version string, ptr to char */
  1596. compat_uptr_t pbuf;
  1597. };
  1598. struct us_stream_param_type32 {
  1599. /* Id of module */
  1600. uint32_t module_id;
  1601. /* Id of parameter */
  1602. uint32_t param_id;
  1603. /* Size of memory of the parameter buffer */
  1604. uint32_t buf_size;
  1605. /* Pointer to the memory of the parameter buffer */
  1606. compat_uptr_t pbuf;
  1607. };
  1608. static void usf_compat_xx_info_type(struct us_xx_info_type32 *us_xx_info32,
  1609. struct us_xx_info_type *us_xx_info)
  1610. {
  1611. int i = 0;
  1612. us_xx_info->client_name = compat_ptr(us_xx_info32->client_name);
  1613. us_xx_info->dev_id = us_xx_info32->dev_id;
  1614. us_xx_info->stream_format = us_xx_info32->stream_format;
  1615. us_xx_info->sample_rate = us_xx_info32->sample_rate;
  1616. us_xx_info->buf_size = us_xx_info32->buf_size;
  1617. us_xx_info->buf_num = us_xx_info32->buf_num;
  1618. us_xx_info->port_cnt = us_xx_info32->port_cnt;
  1619. for (i = 0; i < USF_MAX_PORT_NUM; i++)
  1620. us_xx_info->port_id[i] = us_xx_info32->port_id[i];
  1621. us_xx_info->bits_per_sample = us_xx_info32->bits_per_sample;
  1622. us_xx_info->params_data_size = us_xx_info32->params_data_size;
  1623. us_xx_info->params_data = compat_ptr(us_xx_info32->params_data);
  1624. us_xx_info->max_get_set_param_buf_size =
  1625. us_xx_info32->max_get_set_param_buf_size;
  1626. }
  1627. static int usf_set_tx_info32(struct usf_type *usf, unsigned long arg)
  1628. {
  1629. struct us_tx_info_type32 config_tx32;
  1630. struct us_tx_info_type config_tx;
  1631. int rc = copy_from_user(&config_tx32,
  1632. (struct us_tx_info_type32 __user *) arg,
  1633. sizeof(config_tx32));
  1634. if (rc) {
  1635. pr_err("%s: copy config_tx from user; rc=%d\n",
  1636. __func__, rc);
  1637. return -EFAULT;
  1638. }
  1639. memset(&config_tx, 0, sizeof(config_tx));
  1640. usf_compat_xx_info_type(&(config_tx32.us_xx_info),
  1641. &(config_tx.us_xx_info));
  1642. config_tx.input_info = config_tx32.input_info;
  1643. return __usf_set_tx_info(usf, &config_tx);
  1644. } /* usf_set_tx_info 32*/
  1645. static int usf_set_rx_info32(struct usf_type *usf, unsigned long arg)
  1646. {
  1647. struct us_rx_info_type32 config_rx32;
  1648. struct us_rx_info_type config_rx;
  1649. int rc = copy_from_user(&config_rx32,
  1650. (struct us_rx_info_type32 __user *) arg,
  1651. sizeof(config_rx32));
  1652. if (rc) {
  1653. pr_err("%s: copy config_rx from user; rc=%d\n",
  1654. __func__, rc);
  1655. return -EFAULT;
  1656. }
  1657. memset(&config_rx, 0, sizeof(config_rx));
  1658. usf_compat_xx_info_type(&(config_rx32.us_xx_info),
  1659. &(config_rx.us_xx_info));
  1660. return __usf_set_rx_info(usf, &config_rx);
  1661. } /* usf_set_rx_info32 */
  1662. static int usf_get_tx_update32(struct usf_type *usf, unsigned long arg)
  1663. {
  1664. struct us_tx_update_info_type32 upd_tx_info32;
  1665. struct us_tx_update_info_type upd_tx_info;
  1666. int rc = copy_from_user(&upd_tx_info32,
  1667. (struct us_tx_update_info_type32 __user *) arg,
  1668. sizeof(upd_tx_info32));
  1669. if (rc) {
  1670. pr_err("%s: copy upd_tx_info32 from user; rc=%d\n",
  1671. __func__, rc);
  1672. return -EFAULT;
  1673. }
  1674. memset(&upd_tx_info, 0, sizeof(upd_tx_info));
  1675. upd_tx_info.event_counter = upd_tx_info32.event_counter;
  1676. upd_tx_info.event = compat_ptr(upd_tx_info32.event);
  1677. upd_tx_info.free_region = upd_tx_info32.free_region;
  1678. upd_tx_info.timeout = upd_tx_info32.timeout;
  1679. upd_tx_info.event_filters = upd_tx_info32.event_filters;
  1680. upd_tx_info.params_data_size = upd_tx_info32.params_data_size;
  1681. upd_tx_info.params_data = compat_ptr(upd_tx_info32.params_data);
  1682. upd_tx_info.ready_region = upd_tx_info32.ready_region;
  1683. rc = __usf_get_tx_update(usf, &upd_tx_info);
  1684. if (rc < 0) {
  1685. pr_err("%s: get tx update failed; rc=%d\n",
  1686. __func__, rc);
  1687. return rc;
  1688. }
  1689. /* Update only the fields that were changed */
  1690. upd_tx_info32.ready_region = upd_tx_info.ready_region;
  1691. rc = copy_to_user((void __user *)arg, &upd_tx_info32,
  1692. sizeof(upd_tx_info32));
  1693. if (rc) {
  1694. pr_err("%s: copy upd_tx_info32 to user; rc=%d\n",
  1695. __func__, rc);
  1696. rc = -EFAULT;
  1697. }
  1698. return rc;
  1699. } /* usf_get_tx_update */
  1700. static int usf_set_rx_update32(struct usf_xx_type *usf_xx, unsigned long arg)
  1701. {
  1702. struct us_rx_update_info_type32 upd_rx_info32;
  1703. struct us_rx_update_info_type upd_rx_info;
  1704. int rc = copy_from_user(&upd_rx_info32,
  1705. (struct us_rx_update_info_type32 __user *) arg,
  1706. sizeof(upd_rx_info32));
  1707. if (rc) {
  1708. pr_err("%s: copy upd_rx_info32 from user; rc=%d\n",
  1709. __func__, rc);
  1710. return -EFAULT;
  1711. }
  1712. memset(&upd_rx_info, 0, sizeof(upd_rx_info));
  1713. upd_rx_info.ready_region = upd_rx_info32.ready_region;
  1714. upd_rx_info.params_data_size = upd_rx_info32.params_data_size;
  1715. upd_rx_info.params_data = compat_ptr(upd_rx_info32.params_data);
  1716. upd_rx_info.free_region = upd_rx_info32.free_region;
  1717. rc = __usf_set_rx_update(usf_xx, &upd_rx_info);
  1718. if (rc < 0) {
  1719. pr_err("%s: set rx update failed; rc=%d\n",
  1720. __func__, rc);
  1721. return rc;
  1722. }
  1723. /* Update only the fields that were changed */
  1724. upd_rx_info32.free_region = upd_rx_info.free_region;
  1725. rc = copy_to_user((void __user *)arg,
  1726. &upd_rx_info32,
  1727. sizeof(upd_rx_info32));
  1728. if (rc) {
  1729. pr_err("%s: copy rx_info32 to user; rc=%d\n",
  1730. __func__, rc);
  1731. rc = -EFAULT;
  1732. }
  1733. return rc;
  1734. } /* usf_set_rx_update32 */
  1735. static int usf_set_us_detection32(struct usf_type *usf, unsigned long arg)
  1736. {
  1737. struct us_detect_info_type32 detect_info32;
  1738. struct us_detect_info_type detect_info;
  1739. int rc = copy_from_user(&detect_info32,
  1740. (struct us_detect_info_type32 __user *) arg,
  1741. sizeof(detect_info32));
  1742. if (rc) {
  1743. pr_err("%s: copy detect_info32 from user; rc=%d\n",
  1744. __func__, rc);
  1745. return -EFAULT;
  1746. }
  1747. if (detect_info32.params_data_size > USF_MAX_USER_BUF_SIZE) {
  1748. pr_err("%s: user buffer size exceeds maximum\n",
  1749. __func__);
  1750. return -EFAULT;
  1751. }
  1752. memset(&detect_info, 0, sizeof(detect_info));
  1753. detect_info.us_detector = detect_info32.us_detector;
  1754. detect_info.us_detect_mode = detect_info32.us_detect_mode;
  1755. detect_info.skip_time = detect_info32.skip_time;
  1756. detect_info.params_data_size = detect_info32.params_data_size;
  1757. detect_info.params_data = compat_ptr(detect_info32.params_data);
  1758. detect_info.detect_timeout = detect_info32.detect_timeout;
  1759. detect_info.is_us = detect_info32.is_us;
  1760. rc = __usf_set_us_detection(usf, &detect_info);
  1761. if (rc < 0) {
  1762. pr_err("%s: set us detection failed; rc=%d\n",
  1763. __func__, rc);
  1764. return rc;
  1765. }
  1766. /* Update only the fields that were changed */
  1767. detect_info32.is_us = detect_info.is_us;
  1768. rc = copy_to_user((void __user *)arg,
  1769. &detect_info32,
  1770. sizeof(detect_info32));
  1771. if (rc) {
  1772. pr_err("%s: copy detect_info32 to user; rc=%d\n",
  1773. __func__, rc);
  1774. rc = -EFAULT;
  1775. }
  1776. return rc;
  1777. } /* usf_set_us_detection32 */
  1778. static int usf_get_version32(unsigned long arg)
  1779. {
  1780. struct us_version_info_type32 version_info32;
  1781. struct us_version_info_type version_info;
  1782. int rc = copy_from_user(&version_info32,
  1783. (struct us_version_info_type32 __user *) arg,
  1784. sizeof(version_info32));
  1785. if (rc) {
  1786. pr_err("%s: copy version_info32 from user; rc=%d\n",
  1787. __func__, rc);
  1788. return -EFAULT;
  1789. }
  1790. memset(&version_info, 0, sizeof(version_info));
  1791. version_info.buf_size = version_info32.buf_size;
  1792. version_info.pbuf = compat_ptr(version_info32.pbuf);
  1793. rc = __usf_get_version(&version_info);
  1794. if (rc < 0) {
  1795. pr_err("%s: get version failed; rc=%d\n",
  1796. __func__, rc);
  1797. return rc;
  1798. }
  1799. /* None of the fields were changed */
  1800. rc = copy_to_user((void __user *)arg,
  1801. &version_info32,
  1802. sizeof(version_info32));
  1803. if (rc) {
  1804. pr_err("%s: copy version_info32 to user; rc=%d\n",
  1805. __func__, rc);
  1806. rc = -EFAULT;
  1807. }
  1808. return rc;
  1809. } /* usf_get_version32 */
  1810. static int usf_set_stream_param32(struct usf_xx_type *usf_xx,
  1811. unsigned long arg, int dir)
  1812. {
  1813. struct us_stream_param_type32 set_stream_param32;
  1814. struct us_stream_param_type set_stream_param;
  1815. int rc = 0;
  1816. rc = copy_from_user(&set_stream_param32,
  1817. (struct us_stream_param_type32 __user *) arg,
  1818. sizeof(set_stream_param32));
  1819. if (rc) {
  1820. pr_err("%s: copy set_stream_param from user; rc=%d\n",
  1821. __func__, rc);
  1822. return -EFAULT;
  1823. }
  1824. memset(&set_stream_param, 0, sizeof(set_stream_param));
  1825. set_stream_param.module_id = set_stream_param32.module_id;
  1826. set_stream_param.param_id = set_stream_param32.param_id;
  1827. set_stream_param.buf_size = set_stream_param32.buf_size;
  1828. set_stream_param.pbuf = compat_ptr(set_stream_param32.pbuf);
  1829. return __usf_set_stream_param(usf_xx, &set_stream_param, dir);
  1830. } /* usf_set_stream_param32 */
  1831. static int usf_get_stream_param32(struct usf_xx_type *usf_xx,
  1832. unsigned long arg, int dir)
  1833. {
  1834. struct us_stream_param_type32 get_stream_param32;
  1835. struct us_stream_param_type get_stream_param;
  1836. int rc = 0;
  1837. rc = copy_from_user(&get_stream_param32,
  1838. (struct us_stream_param_type32 __user *) arg,
  1839. sizeof(get_stream_param32));
  1840. if (rc) {
  1841. pr_err("%s: copy get_stream_param from user; rc=%d\n",
  1842. __func__, rc);
  1843. return -EFAULT;
  1844. }
  1845. memset(&get_stream_param, 0, sizeof(get_stream_param));
  1846. get_stream_param.module_id = get_stream_param32.module_id;
  1847. get_stream_param.param_id = get_stream_param32.param_id;
  1848. get_stream_param.buf_size = get_stream_param32.buf_size;
  1849. get_stream_param.pbuf = compat_ptr(get_stream_param32.pbuf);
  1850. return __usf_get_stream_param(usf_xx, &get_stream_param, dir);
  1851. } /* usf_get_stream_param32 */
  1852. static long __usf_compat_ioctl(struct usf_type *usf,
  1853. unsigned int cmd,
  1854. unsigned long arg)
  1855. {
  1856. int rc = 0;
  1857. struct usf_xx_type *usf_xx = NULL;
  1858. switch (cmd) {
  1859. case US_START_TX:
  1860. case US_START_RX:
  1861. case US_STOP_TX:
  1862. case US_STOP_RX: {
  1863. return __usf_ioctl(usf, cmd, arg);
  1864. }
  1865. case US_SET_TX_INFO32: {
  1866. usf_xx = &usf->usf_tx;
  1867. if (usf_xx->usf_state == USF_OPENED_STATE)
  1868. rc = usf_set_tx_info32(usf, arg);
  1869. else {
  1870. pr_err("%s: set_tx_info32: wrong state[%d]\n",
  1871. __func__,
  1872. usf_xx->usf_state);
  1873. return -EBADFD;
  1874. }
  1875. break;
  1876. } /* US_SET_TX_INFO32 */
  1877. case US_SET_RX_INFO32: {
  1878. usf_xx = &usf->usf_rx;
  1879. if (usf_xx->usf_state == USF_OPENED_STATE)
  1880. rc = usf_set_rx_info32(usf, arg);
  1881. else {
  1882. pr_err("%s: set_rx_info32: wrong state[%d]\n",
  1883. __func__,
  1884. usf_xx->usf_state);
  1885. return -EBADFD;
  1886. }
  1887. break;
  1888. } /* US_SET_RX_INFO32 */
  1889. case US_GET_TX_UPDATE32: {
  1890. struct usf_xx_type *usf_xx = &usf->usf_tx;
  1891. if (usf_xx->usf_state == USF_WORK_STATE)
  1892. rc = usf_get_tx_update32(usf, arg);
  1893. else {
  1894. pr_err("%s: get_tx_update32: wrong state[%d]\n",
  1895. __func__,
  1896. usf_xx->usf_state);
  1897. rc = -EBADFD;
  1898. }
  1899. break;
  1900. } /* US_GET_TX_UPDATE32 */
  1901. case US_SET_RX_UPDATE32: {
  1902. struct usf_xx_type *usf_xx = &usf->usf_rx;
  1903. if (usf_xx->usf_state == USF_WORK_STATE)
  1904. rc = usf_set_rx_update32(usf_xx, arg);
  1905. else {
  1906. pr_err("%s: set_rx_update: wrong state[%d]\n",
  1907. __func__,
  1908. usf_xx->usf_state);
  1909. rc = -EBADFD;
  1910. }
  1911. break;
  1912. } /* US_SET_RX_UPDATE32 */
  1913. case US_SET_DETECTION32: {
  1914. struct usf_xx_type *usf_xx = &usf->usf_tx;
  1915. if (usf_xx->usf_state == USF_WORK_STATE)
  1916. rc = usf_set_us_detection32(usf, arg);
  1917. else {
  1918. pr_err("%s: set us detection: wrong state[%d]\n",
  1919. __func__,
  1920. usf_xx->usf_state);
  1921. rc = -EBADFD;
  1922. }
  1923. break;
  1924. } /* US_SET_DETECTION32 */
  1925. case US_GET_VERSION32: {
  1926. rc = usf_get_version32(arg);
  1927. break;
  1928. } /* US_GET_VERSION32 */
  1929. case US_SET_TX_STREAM_PARAM32: {
  1930. rc = usf_set_stream_param32(&usf->usf_tx, arg, OUT);
  1931. break;
  1932. } /* US_SET_TX_STREAM_PARAM32 */
  1933. case US_GET_TX_STREAM_PARAM32: {
  1934. rc = usf_get_stream_param32(&usf->usf_tx, arg, OUT);
  1935. break;
  1936. } /* US_GET_TX_STREAM_PARAM32 */
  1937. case US_SET_RX_STREAM_PARAM32: {
  1938. rc = usf_set_stream_param32(&usf->usf_rx, arg, IN);
  1939. break;
  1940. } /* US_SET_RX_STREAM_PARAM32 */
  1941. case US_GET_RX_STREAM_PARAM32: {
  1942. rc = usf_get_stream_param32(&usf->usf_rx, arg, IN);
  1943. break;
  1944. } /* US_GET_RX_STREAM_PARAM32 */
  1945. default:
  1946. pr_err("%s: unsupported IOCTL command [%d]\n",
  1947. __func__,
  1948. cmd);
  1949. rc = -ENOTTY;
  1950. break;
  1951. }
  1952. if (rc &&
  1953. ((cmd == US_SET_TX_INFO) ||
  1954. (cmd == US_SET_RX_INFO)))
  1955. release_xx(usf_xx);
  1956. return rc;
  1957. } /* __usf_compat_ioctl */
  1958. static long usf_compat_ioctl(struct file *file,
  1959. unsigned int cmd,
  1960. unsigned long arg)
  1961. {
  1962. struct usf_type *usf = file->private_data;
  1963. int rc = 0;
  1964. mutex_lock(&usf->mutex);
  1965. rc = __usf_compat_ioctl(usf, cmd, arg);
  1966. mutex_unlock(&usf->mutex);
  1967. return rc;
  1968. } /* usf_compat_ioctl */
  1969. #endif /* CONFIG_COMPAT */
  1970. static int usf_mmap(struct file *file, struct vm_area_struct *vms)
  1971. {
  1972. struct usf_type *usf = file->private_data;
  1973. int dir = OUT;
  1974. struct usf_xx_type *usf_xx = &usf->usf_tx;
  1975. int rc = 0;
  1976. mutex_lock(&usf->mutex);
  1977. if (vms->vm_flags & USF_VM_WRITE) { /* RX buf mapping */
  1978. dir = IN;
  1979. usf_xx = &usf->usf_rx;
  1980. }
  1981. rc = q6usm_get_virtual_address(dir, usf_xx->usc, vms);
  1982. mutex_unlock(&usf->mutex);
  1983. return rc;
  1984. }
  1985. static uint16_t add_opened_dev(int minor)
  1986. {
  1987. uint16_t ind = 0;
  1988. for (ind = 0; ind < MAX_DEVS_NUMBER; ++ind) {
  1989. if (minor == atomic_cmpxchg(&s_opened_devs[ind], 0, minor)) {
  1990. pr_err("%s: device %d is already opened\n",
  1991. __func__, minor);
  1992. return USF_UNDEF_DEV_ID;
  1993. } else {
  1994. pr_debug("%s: device %d is added; ind=%d\n",
  1995. __func__, minor, ind);
  1996. return ind;
  1997. }
  1998. }
  1999. pr_err("%s: there is no place for device %d\n",
  2000. __func__, minor);
  2001. return USF_UNDEF_DEV_ID;
  2002. }
  2003. static int usf_open(struct inode *inode, struct file *file)
  2004. {
  2005. struct usf_type *usf = NULL;
  2006. uint16_t dev_ind = 0;
  2007. int minor = MINOR(inode->i_rdev);
  2008. dev_ind = add_opened_dev(minor);
  2009. if (dev_ind == USF_UNDEF_DEV_ID)
  2010. return -EBUSY;
  2011. usf = kzalloc(sizeof(struct usf_type), GFP_KERNEL);
  2012. if (usf == NULL)
  2013. return -ENOMEM;
  2014. wakeup_source_init(&usf_wakeup_source, "usf");
  2015. file->private_data = usf;
  2016. usf->dev_ind = dev_ind;
  2017. usf->usf_tx.usf_state = USF_OPENED_STATE;
  2018. usf->usf_rx.usf_state = USF_OPENED_STATE;
  2019. usf->usf_tx.us_detect_type = USF_US_DETECT_UNDEF;
  2020. usf->usf_rx.us_detect_type = USF_US_DETECT_UNDEF;
  2021. mutex_init(&usf->mutex);
  2022. pr_debug("%s:usf in open\n", __func__);
  2023. return 0;
  2024. }
  2025. static int usf_release(struct inode *inode, struct file *file)
  2026. {
  2027. struct usf_type *usf = file->private_data;
  2028. pr_debug("%s: release entry\n", __func__);
  2029. mutex_lock(&usf->mutex);
  2030. usf_release_input(usf);
  2031. usf_disable(&usf->usf_tx);
  2032. usf_disable(&usf->usf_rx);
  2033. atomic_set(&s_opened_devs[usf->dev_ind], 0);
  2034. wakeup_source_trash(&usf_wakeup_source);
  2035. mutex_unlock(&usf->mutex);
  2036. mutex_destroy(&usf->mutex);
  2037. kfree(usf);
  2038. pr_debug("%s: release exit\n", __func__);
  2039. return 0;
  2040. }
  2041. extern long usf_compat_ioctl(struct file *file,
  2042. unsigned int cmd,
  2043. unsigned long arg);
  2044. static const struct file_operations usf_fops = {
  2045. .owner = THIS_MODULE,
  2046. .open = usf_open,
  2047. .release = usf_release,
  2048. .unlocked_ioctl = usf_ioctl,
  2049. #ifdef CONFIG_COMPAT
  2050. .compat_ioctl = usf_compat_ioctl,
  2051. #endif /* CONFIG_COMPAT */
  2052. .mmap = usf_mmap,
  2053. };
  2054. static struct miscdevice usf_misc[MAX_DEVS_NUMBER] = {
  2055. {
  2056. .minor = MISC_DYNAMIC_MINOR,
  2057. .name = "usf1",
  2058. .fops = &usf_fops,
  2059. },
  2060. };
  2061. static int __init usf_init(void)
  2062. {
  2063. int rc = 0;
  2064. uint16_t ind = 0;
  2065. pr_debug("%s: USF SW version %s.\n", __func__, DRV_VERSION);
  2066. pr_debug("%s: Max %d devs registration\n", __func__, MAX_DEVS_NUMBER);
  2067. for (ind = 0; ind < MAX_DEVS_NUMBER; ++ind) {
  2068. rc = misc_register(&usf_misc[ind]);
  2069. if (rc) {
  2070. pr_err("%s: misc_register() failed ind=%d; rc = %d\n",
  2071. __func__, ind, rc);
  2072. break;
  2073. }
  2074. }
  2075. if (!rc) q6usm_init();
  2076. return rc;
  2077. }
  2078. module_init(usf_init);
  2079. static void __exit usf_exit(void)
  2080. {
  2081. uint16_t ind = 0;
  2082. for (ind = 0; ind < MAX_DEVS_NUMBER; ++ind)
  2083. misc_deregister(&usf_misc[ind]);
  2084. }
  2085. module_exit(usf_exit);
  2086. MODULE_DESCRIPTION("Ultrasound framework driver");
  2087. MODULE_LICENSE("GPL v2");