hid-core.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * HID support for Linux
  4. *
  5. * Copyright (c) 1999 Andreas Gal
  6. * Copyright (c) 2000-2005 Vojtech Pavlik <[email protected]>
  7. * Copyright (c) 2005 Michael Haboustak <[email protected]> for Concept2, Inc
  8. * Copyright (c) 2006-2012 Jiri Kosina
  9. */
  10. /*
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/init.h>
  16. #include <linux/kernel.h>
  17. #include <linux/list.h>
  18. #include <linux/mm.h>
  19. #include <linux/spinlock.h>
  20. #include <asm/unaligned.h>
  21. #include <asm/byteorder.h>
  22. #include <linux/input.h>
  23. #include <linux/wait.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/sched.h>
  26. #include <linux/semaphore.h>
  27. #include <linux/hid.h>
  28. #include <linux/hiddev.h>
  29. #include <linux/hid-debug.h>
  30. #include <linux/hidraw.h>
  31. #include <linux/uhid.h>
  32. #include "hid-ids.h"
  33. /*
  34. * Version Information
  35. */
  36. #define DRIVER_DESC "HID core driver"
  37. int hid_debug = 0;
  38. module_param_named(debug, hid_debug, int, 0600);
  39. MODULE_PARM_DESC(debug, "toggle HID debugging messages");
  40. EXPORT_SYMBOL_GPL(hid_debug);
  41. static int hid_ignore_special_drivers = 0;
  42. module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
  43. MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
  44. /*
  45. * Register a new report for a device.
  46. */
  47. struct hid_report *hid_register_report(struct hid_device *device,
  48. enum hid_report_type type, unsigned int id,
  49. unsigned int application)
  50. {
  51. struct hid_report_enum *report_enum = device->report_enum + type;
  52. struct hid_report *report;
  53. if (id >= HID_MAX_IDS)
  54. return NULL;
  55. if (report_enum->report_id_hash[id])
  56. return report_enum->report_id_hash[id];
  57. report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
  58. if (!report)
  59. return NULL;
  60. if (id != 0)
  61. report_enum->numbered = 1;
  62. report->id = id;
  63. report->type = type;
  64. report->size = 0;
  65. report->device = device;
  66. report->application = application;
  67. report_enum->report_id_hash[id] = report;
  68. list_add_tail(&report->list, &report_enum->report_list);
  69. INIT_LIST_HEAD(&report->field_entry_list);
  70. return report;
  71. }
  72. EXPORT_SYMBOL_GPL(hid_register_report);
  73. /*
  74. * Register a new field for this report.
  75. */
  76. static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
  77. {
  78. struct hid_field *field;
  79. if (report->maxfield == HID_MAX_FIELDS) {
  80. hid_err(report->device, "too many fields in report\n");
  81. return NULL;
  82. }
  83. field = kzalloc((sizeof(struct hid_field) +
  84. usages * sizeof(struct hid_usage) +
  85. 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
  86. if (!field)
  87. return NULL;
  88. field->index = report->maxfield++;
  89. report->field[field->index] = field;
  90. field->usage = (struct hid_usage *)(field + 1);
  91. field->value = (s32 *)(field->usage + usages);
  92. field->new_value = (s32 *)(field->value + usages);
  93. field->usages_priorities = (s32 *)(field->new_value + usages);
  94. field->report = report;
  95. return field;
  96. }
  97. /*
  98. * Open a collection. The type/usage is pushed on the stack.
  99. */
  100. static int open_collection(struct hid_parser *parser, unsigned type)
  101. {
  102. struct hid_collection *collection;
  103. unsigned usage;
  104. int collection_index;
  105. usage = parser->local.usage[0];
  106. if (parser->collection_stack_ptr == parser->collection_stack_size) {
  107. unsigned int *collection_stack;
  108. unsigned int new_size = parser->collection_stack_size +
  109. HID_COLLECTION_STACK_SIZE;
  110. collection_stack = krealloc(parser->collection_stack,
  111. new_size * sizeof(unsigned int),
  112. GFP_KERNEL);
  113. if (!collection_stack)
  114. return -ENOMEM;
  115. parser->collection_stack = collection_stack;
  116. parser->collection_stack_size = new_size;
  117. }
  118. if (parser->device->maxcollection == parser->device->collection_size) {
  119. collection = kmalloc(
  120. array3_size(sizeof(struct hid_collection),
  121. parser->device->collection_size,
  122. 2),
  123. GFP_KERNEL);
  124. if (collection == NULL) {
  125. hid_err(parser->device, "failed to reallocate collection array\n");
  126. return -ENOMEM;
  127. }
  128. memcpy(collection, parser->device->collection,
  129. sizeof(struct hid_collection) *
  130. parser->device->collection_size);
  131. memset(collection + parser->device->collection_size, 0,
  132. sizeof(struct hid_collection) *
  133. parser->device->collection_size);
  134. kfree(parser->device->collection);
  135. parser->device->collection = collection;
  136. parser->device->collection_size *= 2;
  137. }
  138. parser->collection_stack[parser->collection_stack_ptr++] =
  139. parser->device->maxcollection;
  140. collection_index = parser->device->maxcollection++;
  141. collection = parser->device->collection + collection_index;
  142. collection->type = type;
  143. collection->usage = usage;
  144. collection->level = parser->collection_stack_ptr - 1;
  145. collection->parent_idx = (collection->level == 0) ? -1 :
  146. parser->collection_stack[collection->level - 1];
  147. if (type == HID_COLLECTION_APPLICATION)
  148. parser->device->maxapplication++;
  149. return 0;
  150. }
  151. /*
  152. * Close a collection.
  153. */
  154. static int close_collection(struct hid_parser *parser)
  155. {
  156. if (!parser->collection_stack_ptr) {
  157. hid_err(parser->device, "collection stack underflow\n");
  158. return -EINVAL;
  159. }
  160. parser->collection_stack_ptr--;
  161. return 0;
  162. }
  163. /*
  164. * Climb up the stack, search for the specified collection type
  165. * and return the usage.
  166. */
  167. static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
  168. {
  169. struct hid_collection *collection = parser->device->collection;
  170. int n;
  171. for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
  172. unsigned index = parser->collection_stack[n];
  173. if (collection[index].type == type)
  174. return collection[index].usage;
  175. }
  176. return 0; /* we know nothing about this usage type */
  177. }
  178. /*
  179. * Concatenate usage which defines 16 bits or less with the
  180. * currently defined usage page to form a 32 bit usage
  181. */
  182. static void complete_usage(struct hid_parser *parser, unsigned int index)
  183. {
  184. parser->local.usage[index] &= 0xFFFF;
  185. parser->local.usage[index] |=
  186. (parser->global.usage_page & 0xFFFF) << 16;
  187. }
  188. /*
  189. * Add a usage to the temporary parser table.
  190. */
  191. static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
  192. {
  193. if (parser->local.usage_index >= HID_MAX_USAGES) {
  194. hid_err(parser->device, "usage index exceeded\n");
  195. return -1;
  196. }
  197. parser->local.usage[parser->local.usage_index] = usage;
  198. /*
  199. * If Usage item only includes usage id, concatenate it with
  200. * currently defined usage page
  201. */
  202. if (size <= 2)
  203. complete_usage(parser, parser->local.usage_index);
  204. parser->local.usage_size[parser->local.usage_index] = size;
  205. parser->local.collection_index[parser->local.usage_index] =
  206. parser->collection_stack_ptr ?
  207. parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
  208. parser->local.usage_index++;
  209. return 0;
  210. }
  211. /*
  212. * Register a new field for this report.
  213. */
  214. static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
  215. {
  216. struct hid_report *report;
  217. struct hid_field *field;
  218. unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
  219. unsigned int usages;
  220. unsigned int offset;
  221. unsigned int i;
  222. unsigned int application;
  223. application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
  224. report = hid_register_report(parser->device, report_type,
  225. parser->global.report_id, application);
  226. if (!report) {
  227. hid_err(parser->device, "hid_register_report failed\n");
  228. return -1;
  229. }
  230. /* Handle both signed and unsigned cases properly */
  231. if ((parser->global.logical_minimum < 0 &&
  232. parser->global.logical_maximum <
  233. parser->global.logical_minimum) ||
  234. (parser->global.logical_minimum >= 0 &&
  235. (__u32)parser->global.logical_maximum <
  236. (__u32)parser->global.logical_minimum)) {
  237. dbg_hid("logical range invalid 0x%x 0x%x\n",
  238. parser->global.logical_minimum,
  239. parser->global.logical_maximum);
  240. return -1;
  241. }
  242. offset = report->size;
  243. report->size += parser->global.report_size * parser->global.report_count;
  244. if (IS_ENABLED(CONFIG_UHID) && parser->device->ll_driver == &uhid_hid_driver)
  245. max_buffer_size = UHID_DATA_MAX;
  246. /* Total size check: Allow for possible report index byte */
  247. if (report->size > (max_buffer_size - 1) << 3) {
  248. hid_err(parser->device, "report is too long\n");
  249. return -1;
  250. }
  251. if (!parser->local.usage_index) /* Ignore padding fields */
  252. return 0;
  253. usages = max_t(unsigned, parser->local.usage_index,
  254. parser->global.report_count);
  255. field = hid_register_field(report, usages);
  256. if (!field)
  257. return 0;
  258. field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
  259. field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
  260. field->application = application;
  261. for (i = 0; i < usages; i++) {
  262. unsigned j = i;
  263. /* Duplicate the last usage we parsed if we have excess values */
  264. if (i >= parser->local.usage_index)
  265. j = parser->local.usage_index - 1;
  266. field->usage[i].hid = parser->local.usage[j];
  267. field->usage[i].collection_index =
  268. parser->local.collection_index[j];
  269. field->usage[i].usage_index = i;
  270. field->usage[i].resolution_multiplier = 1;
  271. }
  272. field->maxusage = usages;
  273. field->flags = flags;
  274. field->report_offset = offset;
  275. field->report_type = report_type;
  276. field->report_size = parser->global.report_size;
  277. field->report_count = parser->global.report_count;
  278. field->logical_minimum = parser->global.logical_minimum;
  279. field->logical_maximum = parser->global.logical_maximum;
  280. field->physical_minimum = parser->global.physical_minimum;
  281. field->physical_maximum = parser->global.physical_maximum;
  282. field->unit_exponent = parser->global.unit_exponent;
  283. field->unit = parser->global.unit;
  284. return 0;
  285. }
  286. /*
  287. * Read data value from item.
  288. */
  289. static u32 item_udata(struct hid_item *item)
  290. {
  291. switch (item->size) {
  292. case 1: return item->data.u8;
  293. case 2: return item->data.u16;
  294. case 4: return item->data.u32;
  295. }
  296. return 0;
  297. }
  298. static s32 item_sdata(struct hid_item *item)
  299. {
  300. switch (item->size) {
  301. case 1: return item->data.s8;
  302. case 2: return item->data.s16;
  303. case 4: return item->data.s32;
  304. }
  305. return 0;
  306. }
  307. /*
  308. * Process a global item.
  309. */
  310. static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
  311. {
  312. __s32 raw_value;
  313. switch (item->tag) {
  314. case HID_GLOBAL_ITEM_TAG_PUSH:
  315. if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
  316. hid_err(parser->device, "global environment stack overflow\n");
  317. return -1;
  318. }
  319. memcpy(parser->global_stack + parser->global_stack_ptr++,
  320. &parser->global, sizeof(struct hid_global));
  321. return 0;
  322. case HID_GLOBAL_ITEM_TAG_POP:
  323. if (!parser->global_stack_ptr) {
  324. hid_err(parser->device, "global environment stack underflow\n");
  325. return -1;
  326. }
  327. memcpy(&parser->global, parser->global_stack +
  328. --parser->global_stack_ptr, sizeof(struct hid_global));
  329. return 0;
  330. case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
  331. parser->global.usage_page = item_udata(item);
  332. return 0;
  333. case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
  334. parser->global.logical_minimum = item_sdata(item);
  335. return 0;
  336. case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
  337. if (parser->global.logical_minimum < 0)
  338. parser->global.logical_maximum = item_sdata(item);
  339. else
  340. parser->global.logical_maximum = item_udata(item);
  341. return 0;
  342. case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
  343. parser->global.physical_minimum = item_sdata(item);
  344. return 0;
  345. case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
  346. if (parser->global.physical_minimum < 0)
  347. parser->global.physical_maximum = item_sdata(item);
  348. else
  349. parser->global.physical_maximum = item_udata(item);
  350. return 0;
  351. case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
  352. /* Many devices provide unit exponent as a two's complement
  353. * nibble due to the common misunderstanding of HID
  354. * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
  355. * both this and the standard encoding. */
  356. raw_value = item_sdata(item);
  357. if (!(raw_value & 0xfffffff0))
  358. parser->global.unit_exponent = hid_snto32(raw_value, 4);
  359. else
  360. parser->global.unit_exponent = raw_value;
  361. return 0;
  362. case HID_GLOBAL_ITEM_TAG_UNIT:
  363. parser->global.unit = item_udata(item);
  364. return 0;
  365. case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
  366. parser->global.report_size = item_udata(item);
  367. if (parser->global.report_size > 256) {
  368. hid_err(parser->device, "invalid report_size %d\n",
  369. parser->global.report_size);
  370. return -1;
  371. }
  372. return 0;
  373. case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
  374. parser->global.report_count = item_udata(item);
  375. if (parser->global.report_count > HID_MAX_USAGES) {
  376. hid_err(parser->device, "invalid report_count %d\n",
  377. parser->global.report_count);
  378. return -1;
  379. }
  380. return 0;
  381. case HID_GLOBAL_ITEM_TAG_REPORT_ID:
  382. parser->global.report_id = item_udata(item);
  383. if (parser->global.report_id == 0 ||
  384. parser->global.report_id >= HID_MAX_IDS) {
  385. hid_err(parser->device, "report_id %u is invalid\n",
  386. parser->global.report_id);
  387. return -1;
  388. }
  389. return 0;
  390. default:
  391. hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
  392. return -1;
  393. }
  394. }
  395. /*
  396. * Process a local item.
  397. */
  398. static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
  399. {
  400. __u32 data;
  401. unsigned n;
  402. __u32 count;
  403. data = item_udata(item);
  404. switch (item->tag) {
  405. case HID_LOCAL_ITEM_TAG_DELIMITER:
  406. if (data) {
  407. /*
  408. * We treat items before the first delimiter
  409. * as global to all usage sets (branch 0).
  410. * In the moment we process only these global
  411. * items and the first delimiter set.
  412. */
  413. if (parser->local.delimiter_depth != 0) {
  414. hid_err(parser->device, "nested delimiters\n");
  415. return -1;
  416. }
  417. parser->local.delimiter_depth++;
  418. parser->local.delimiter_branch++;
  419. } else {
  420. if (parser->local.delimiter_depth < 1) {
  421. hid_err(parser->device, "bogus close delimiter\n");
  422. return -1;
  423. }
  424. parser->local.delimiter_depth--;
  425. }
  426. return 0;
  427. case HID_LOCAL_ITEM_TAG_USAGE:
  428. if (parser->local.delimiter_branch > 1) {
  429. dbg_hid("alternative usage ignored\n");
  430. return 0;
  431. }
  432. return hid_add_usage(parser, data, item->size);
  433. case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
  434. if (parser->local.delimiter_branch > 1) {
  435. dbg_hid("alternative usage ignored\n");
  436. return 0;
  437. }
  438. parser->local.usage_minimum = data;
  439. return 0;
  440. case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
  441. if (parser->local.delimiter_branch > 1) {
  442. dbg_hid("alternative usage ignored\n");
  443. return 0;
  444. }
  445. count = data - parser->local.usage_minimum;
  446. if (count + parser->local.usage_index >= HID_MAX_USAGES) {
  447. /*
  448. * We do not warn if the name is not set, we are
  449. * actually pre-scanning the device.
  450. */
  451. if (dev_name(&parser->device->dev))
  452. hid_warn(parser->device,
  453. "ignoring exceeding usage max\n");
  454. data = HID_MAX_USAGES - parser->local.usage_index +
  455. parser->local.usage_minimum - 1;
  456. if (data <= 0) {
  457. hid_err(parser->device,
  458. "no more usage index available\n");
  459. return -1;
  460. }
  461. }
  462. for (n = parser->local.usage_minimum; n <= data; n++)
  463. if (hid_add_usage(parser, n, item->size)) {
  464. dbg_hid("hid_add_usage failed\n");
  465. return -1;
  466. }
  467. return 0;
  468. default:
  469. dbg_hid("unknown local item tag 0x%x\n", item->tag);
  470. return 0;
  471. }
  472. return 0;
  473. }
  474. /*
  475. * Concatenate Usage Pages into Usages where relevant:
  476. * As per specification, 6.2.2.8: "When the parser encounters a main item it
  477. * concatenates the last declared Usage Page with a Usage to form a complete
  478. * usage value."
  479. */
  480. static void hid_concatenate_last_usage_page(struct hid_parser *parser)
  481. {
  482. int i;
  483. unsigned int usage_page;
  484. unsigned int current_page;
  485. if (!parser->local.usage_index)
  486. return;
  487. usage_page = parser->global.usage_page;
  488. /*
  489. * Concatenate usage page again only if last declared Usage Page
  490. * has not been already used in previous usages concatenation
  491. */
  492. for (i = parser->local.usage_index - 1; i >= 0; i--) {
  493. if (parser->local.usage_size[i] > 2)
  494. /* Ignore extended usages */
  495. continue;
  496. current_page = parser->local.usage[i] >> 16;
  497. if (current_page == usage_page)
  498. break;
  499. complete_usage(parser, i);
  500. }
  501. }
  502. /*
  503. * Process a main item.
  504. */
  505. static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
  506. {
  507. __u32 data;
  508. int ret;
  509. hid_concatenate_last_usage_page(parser);
  510. data = item_udata(item);
  511. switch (item->tag) {
  512. case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
  513. ret = open_collection(parser, data & 0xff);
  514. break;
  515. case HID_MAIN_ITEM_TAG_END_COLLECTION:
  516. ret = close_collection(parser);
  517. break;
  518. case HID_MAIN_ITEM_TAG_INPUT:
  519. ret = hid_add_field(parser, HID_INPUT_REPORT, data);
  520. break;
  521. case HID_MAIN_ITEM_TAG_OUTPUT:
  522. ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
  523. break;
  524. case HID_MAIN_ITEM_TAG_FEATURE:
  525. ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
  526. break;
  527. default:
  528. hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
  529. ret = 0;
  530. }
  531. memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
  532. return ret;
  533. }
  534. /*
  535. * Process a reserved item.
  536. */
  537. static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
  538. {
  539. dbg_hid("reserved item type, tag 0x%x\n", item->tag);
  540. return 0;
  541. }
  542. /*
  543. * Free a report and all registered fields. The field->usage and
  544. * field->value table's are allocated behind the field, so we need
  545. * only to free(field) itself.
  546. */
  547. static void hid_free_report(struct hid_report *report)
  548. {
  549. unsigned n;
  550. kfree(report->field_entries);
  551. for (n = 0; n < report->maxfield; n++)
  552. kfree(report->field[n]);
  553. kfree(report);
  554. }
  555. /*
  556. * Close report. This function returns the device
  557. * state to the point prior to hid_open_report().
  558. */
  559. static void hid_close_report(struct hid_device *device)
  560. {
  561. unsigned i, j;
  562. for (i = 0; i < HID_REPORT_TYPES; i++) {
  563. struct hid_report_enum *report_enum = device->report_enum + i;
  564. for (j = 0; j < HID_MAX_IDS; j++) {
  565. struct hid_report *report = report_enum->report_id_hash[j];
  566. if (report)
  567. hid_free_report(report);
  568. }
  569. memset(report_enum, 0, sizeof(*report_enum));
  570. INIT_LIST_HEAD(&report_enum->report_list);
  571. }
  572. kfree(device->rdesc);
  573. device->rdesc = NULL;
  574. device->rsize = 0;
  575. kfree(device->collection);
  576. device->collection = NULL;
  577. device->collection_size = 0;
  578. device->maxcollection = 0;
  579. device->maxapplication = 0;
  580. device->status &= ~HID_STAT_PARSED;
  581. }
  582. /*
  583. * Free a device structure, all reports, and all fields.
  584. */
  585. static void hid_device_release(struct device *dev)
  586. {
  587. struct hid_device *hid = to_hid_device(dev);
  588. hid_close_report(hid);
  589. kfree(hid->dev_rdesc);
  590. kfree(hid);
  591. }
  592. /*
  593. * Fetch a report description item from the data stream. We support long
  594. * items, though they are not used yet.
  595. */
  596. static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
  597. {
  598. u8 b;
  599. if ((end - start) <= 0)
  600. return NULL;
  601. b = *start++;
  602. item->type = (b >> 2) & 3;
  603. item->tag = (b >> 4) & 15;
  604. if (item->tag == HID_ITEM_TAG_LONG) {
  605. item->format = HID_ITEM_FORMAT_LONG;
  606. if ((end - start) < 2)
  607. return NULL;
  608. item->size = *start++;
  609. item->tag = *start++;
  610. if ((end - start) < item->size)
  611. return NULL;
  612. item->data.longdata = start;
  613. start += item->size;
  614. return start;
  615. }
  616. item->format = HID_ITEM_FORMAT_SHORT;
  617. item->size = b & 3;
  618. switch (item->size) {
  619. case 0:
  620. return start;
  621. case 1:
  622. if ((end - start) < 1)
  623. return NULL;
  624. item->data.u8 = *start++;
  625. return start;
  626. case 2:
  627. if ((end - start) < 2)
  628. return NULL;
  629. item->data.u16 = get_unaligned_le16(start);
  630. start = (__u8 *)((__le16 *)start + 1);
  631. return start;
  632. case 3:
  633. item->size++;
  634. if ((end - start) < 4)
  635. return NULL;
  636. item->data.u32 = get_unaligned_le32(start);
  637. start = (__u8 *)((__le32 *)start + 1);
  638. return start;
  639. }
  640. return NULL;
  641. }
  642. static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
  643. {
  644. struct hid_device *hid = parser->device;
  645. if (usage == HID_DG_CONTACTID)
  646. hid->group = HID_GROUP_MULTITOUCH;
  647. }
  648. static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
  649. {
  650. if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
  651. parser->global.report_size == 8)
  652. parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
  653. if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
  654. parser->global.report_size == 8)
  655. parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
  656. }
  657. static void hid_scan_collection(struct hid_parser *parser, unsigned type)
  658. {
  659. struct hid_device *hid = parser->device;
  660. int i;
  661. if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
  662. type == HID_COLLECTION_PHYSICAL)
  663. hid->group = HID_GROUP_SENSOR_HUB;
  664. if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
  665. hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
  666. hid->group == HID_GROUP_MULTITOUCH)
  667. hid->group = HID_GROUP_GENERIC;
  668. if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
  669. for (i = 0; i < parser->local.usage_index; i++)
  670. if (parser->local.usage[i] == HID_GD_POINTER)
  671. parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
  672. if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
  673. parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
  674. if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
  675. for (i = 0; i < parser->local.usage_index; i++)
  676. if (parser->local.usage[i] ==
  677. (HID_UP_GOOGLEVENDOR | 0x0001))
  678. parser->device->group =
  679. HID_GROUP_VIVALDI;
  680. }
  681. static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
  682. {
  683. __u32 data;
  684. int i;
  685. hid_concatenate_last_usage_page(parser);
  686. data = item_udata(item);
  687. switch (item->tag) {
  688. case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
  689. hid_scan_collection(parser, data & 0xff);
  690. break;
  691. case HID_MAIN_ITEM_TAG_END_COLLECTION:
  692. break;
  693. case HID_MAIN_ITEM_TAG_INPUT:
  694. /* ignore constant inputs, they will be ignored by hid-input */
  695. if (data & HID_MAIN_ITEM_CONSTANT)
  696. break;
  697. for (i = 0; i < parser->local.usage_index; i++)
  698. hid_scan_input_usage(parser, parser->local.usage[i]);
  699. break;
  700. case HID_MAIN_ITEM_TAG_OUTPUT:
  701. break;
  702. case HID_MAIN_ITEM_TAG_FEATURE:
  703. for (i = 0; i < parser->local.usage_index; i++)
  704. hid_scan_feature_usage(parser, parser->local.usage[i]);
  705. break;
  706. }
  707. /* Reset the local parser environment */
  708. memset(&parser->local, 0, sizeof(parser->local));
  709. return 0;
  710. }
  711. /*
  712. * Scan a report descriptor before the device is added to the bus.
  713. * Sets device groups and other properties that determine what driver
  714. * to load.
  715. */
  716. static int hid_scan_report(struct hid_device *hid)
  717. {
  718. struct hid_parser *parser;
  719. struct hid_item item;
  720. __u8 *start = hid->dev_rdesc;
  721. __u8 *end = start + hid->dev_rsize;
  722. static int (*dispatch_type[])(struct hid_parser *parser,
  723. struct hid_item *item) = {
  724. hid_scan_main,
  725. hid_parser_global,
  726. hid_parser_local,
  727. hid_parser_reserved
  728. };
  729. parser = vzalloc(sizeof(struct hid_parser));
  730. if (!parser)
  731. return -ENOMEM;
  732. parser->device = hid;
  733. hid->group = HID_GROUP_GENERIC;
  734. /*
  735. * The parsing is simpler than the one in hid_open_report() as we should
  736. * be robust against hid errors. Those errors will be raised by
  737. * hid_open_report() anyway.
  738. */
  739. while ((start = fetch_item(start, end, &item)) != NULL)
  740. dispatch_type[item.type](parser, &item);
  741. /*
  742. * Handle special flags set during scanning.
  743. */
  744. if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
  745. (hid->group == HID_GROUP_MULTITOUCH))
  746. hid->group = HID_GROUP_MULTITOUCH_WIN_8;
  747. /*
  748. * Vendor specific handlings
  749. */
  750. switch (hid->vendor) {
  751. case USB_VENDOR_ID_WACOM:
  752. hid->group = HID_GROUP_WACOM;
  753. break;
  754. case USB_VENDOR_ID_SYNAPTICS:
  755. if (hid->group == HID_GROUP_GENERIC)
  756. if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
  757. && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
  758. /*
  759. * hid-rmi should take care of them,
  760. * not hid-generic
  761. */
  762. hid->group = HID_GROUP_RMI;
  763. break;
  764. }
  765. kfree(parser->collection_stack);
  766. vfree(parser);
  767. return 0;
  768. }
  769. /**
  770. * hid_parse_report - parse device report
  771. *
  772. * @hid: hid device
  773. * @start: report start
  774. * @size: report size
  775. *
  776. * Allocate the device report as read by the bus driver. This function should
  777. * only be called from parse() in ll drivers.
  778. */
  779. int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
  780. {
  781. hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
  782. if (!hid->dev_rdesc)
  783. return -ENOMEM;
  784. hid->dev_rsize = size;
  785. return 0;
  786. }
  787. EXPORT_SYMBOL_GPL(hid_parse_report);
  788. static const char * const hid_report_names[] = {
  789. "HID_INPUT_REPORT",
  790. "HID_OUTPUT_REPORT",
  791. "HID_FEATURE_REPORT",
  792. };
  793. /**
  794. * hid_validate_values - validate existing device report's value indexes
  795. *
  796. * @hid: hid device
  797. * @type: which report type to examine
  798. * @id: which report ID to examine (0 for first)
  799. * @field_index: which report field to examine
  800. * @report_counts: expected number of values
  801. *
  802. * Validate the number of values in a given field of a given report, after
  803. * parsing.
  804. */
  805. struct hid_report *hid_validate_values(struct hid_device *hid,
  806. enum hid_report_type type, unsigned int id,
  807. unsigned int field_index,
  808. unsigned int report_counts)
  809. {
  810. struct hid_report *report;
  811. if (type > HID_FEATURE_REPORT) {
  812. hid_err(hid, "invalid HID report type %u\n", type);
  813. return NULL;
  814. }
  815. if (id >= HID_MAX_IDS) {
  816. hid_err(hid, "invalid HID report id %u\n", id);
  817. return NULL;
  818. }
  819. /*
  820. * Explicitly not using hid_get_report() here since it depends on
  821. * ->numbered being checked, which may not always be the case when
  822. * drivers go to access report values.
  823. */
  824. if (id == 0) {
  825. /*
  826. * Validating on id 0 means we should examine the first
  827. * report in the list.
  828. */
  829. report = list_first_entry_or_null(
  830. &hid->report_enum[type].report_list,
  831. struct hid_report, list);
  832. } else {
  833. report = hid->report_enum[type].report_id_hash[id];
  834. }
  835. if (!report) {
  836. hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
  837. return NULL;
  838. }
  839. if (report->maxfield <= field_index) {
  840. hid_err(hid, "not enough fields in %s %u\n",
  841. hid_report_names[type], id);
  842. return NULL;
  843. }
  844. if (report->field[field_index]->report_count < report_counts) {
  845. hid_err(hid, "not enough values in %s %u field %u\n",
  846. hid_report_names[type], id, field_index);
  847. return NULL;
  848. }
  849. return report;
  850. }
  851. EXPORT_SYMBOL_GPL(hid_validate_values);
  852. static int hid_calculate_multiplier(struct hid_device *hid,
  853. struct hid_field *multiplier)
  854. {
  855. int m;
  856. __s32 v = *multiplier->value;
  857. __s32 lmin = multiplier->logical_minimum;
  858. __s32 lmax = multiplier->logical_maximum;
  859. __s32 pmin = multiplier->physical_minimum;
  860. __s32 pmax = multiplier->physical_maximum;
  861. /*
  862. * "Because OS implementations will generally divide the control's
  863. * reported count by the Effective Resolution Multiplier, designers
  864. * should take care not to establish a potential Effective
  865. * Resolution Multiplier of zero."
  866. * HID Usage Table, v1.12, Section 4.3.1, p31
  867. */
  868. if (lmax - lmin == 0)
  869. return 1;
  870. /*
  871. * Handling the unit exponent is left as an exercise to whoever
  872. * finds a device where that exponent is not 0.
  873. */
  874. m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
  875. if (unlikely(multiplier->unit_exponent != 0)) {
  876. hid_warn(hid,
  877. "unsupported Resolution Multiplier unit exponent %d\n",
  878. multiplier->unit_exponent);
  879. }
  880. /* There are no devices with an effective multiplier > 255 */
  881. if (unlikely(m == 0 || m > 255 || m < -255)) {
  882. hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
  883. m = 1;
  884. }
  885. return m;
  886. }
  887. static void hid_apply_multiplier_to_field(struct hid_device *hid,
  888. struct hid_field *field,
  889. struct hid_collection *multiplier_collection,
  890. int effective_multiplier)
  891. {
  892. struct hid_collection *collection;
  893. struct hid_usage *usage;
  894. int i;
  895. /*
  896. * If multiplier_collection is NULL, the multiplier applies
  897. * to all fields in the report.
  898. * Otherwise, it is the Logical Collection the multiplier applies to
  899. * but our field may be in a subcollection of that collection.
  900. */
  901. for (i = 0; i < field->maxusage; i++) {
  902. usage = &field->usage[i];
  903. collection = &hid->collection[usage->collection_index];
  904. while (collection->parent_idx != -1 &&
  905. collection != multiplier_collection)
  906. collection = &hid->collection[collection->parent_idx];
  907. if (collection->parent_idx != -1 ||
  908. multiplier_collection == NULL)
  909. usage->resolution_multiplier = effective_multiplier;
  910. }
  911. }
  912. static void hid_apply_multiplier(struct hid_device *hid,
  913. struct hid_field *multiplier)
  914. {
  915. struct hid_report_enum *rep_enum;
  916. struct hid_report *rep;
  917. struct hid_field *field;
  918. struct hid_collection *multiplier_collection;
  919. int effective_multiplier;
  920. int i;
  921. /*
  922. * "The Resolution Multiplier control must be contained in the same
  923. * Logical Collection as the control(s) to which it is to be applied.
  924. * If no Resolution Multiplier is defined, then the Resolution
  925. * Multiplier defaults to 1. If more than one control exists in a
  926. * Logical Collection, the Resolution Multiplier is associated with
  927. * all controls in the collection. If no Logical Collection is
  928. * defined, the Resolution Multiplier is associated with all
  929. * controls in the report."
  930. * HID Usage Table, v1.12, Section 4.3.1, p30
  931. *
  932. * Thus, search from the current collection upwards until we find a
  933. * logical collection. Then search all fields for that same parent
  934. * collection. Those are the fields the multiplier applies to.
  935. *
  936. * If we have more than one multiplier, it will overwrite the
  937. * applicable fields later.
  938. */
  939. multiplier_collection = &hid->collection[multiplier->usage->collection_index];
  940. while (multiplier_collection->parent_idx != -1 &&
  941. multiplier_collection->type != HID_COLLECTION_LOGICAL)
  942. multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
  943. effective_multiplier = hid_calculate_multiplier(hid, multiplier);
  944. rep_enum = &hid->report_enum[HID_INPUT_REPORT];
  945. list_for_each_entry(rep, &rep_enum->report_list, list) {
  946. for (i = 0; i < rep->maxfield; i++) {
  947. field = rep->field[i];
  948. hid_apply_multiplier_to_field(hid, field,
  949. multiplier_collection,
  950. effective_multiplier);
  951. }
  952. }
  953. }
  954. /*
  955. * hid_setup_resolution_multiplier - set up all resolution multipliers
  956. *
  957. * @device: hid device
  958. *
  959. * Search for all Resolution Multiplier Feature Reports and apply their
  960. * value to all matching Input items. This only updates the internal struct
  961. * fields.
  962. *
  963. * The Resolution Multiplier is applied by the hardware. If the multiplier
  964. * is anything other than 1, the hardware will send pre-multiplied events
  965. * so that the same physical interaction generates an accumulated
  966. * accumulated_value = value * * multiplier
  967. * This may be achieved by sending
  968. * - "value * multiplier" for each event, or
  969. * - "value" but "multiplier" times as frequently, or
  970. * - a combination of the above
  971. * The only guarantee is that the same physical interaction always generates
  972. * an accumulated 'value * multiplier'.
  973. *
  974. * This function must be called before any event processing and after
  975. * any SetRequest to the Resolution Multiplier.
  976. */
  977. void hid_setup_resolution_multiplier(struct hid_device *hid)
  978. {
  979. struct hid_report_enum *rep_enum;
  980. struct hid_report *rep;
  981. struct hid_usage *usage;
  982. int i, j;
  983. rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
  984. list_for_each_entry(rep, &rep_enum->report_list, list) {
  985. for (i = 0; i < rep->maxfield; i++) {
  986. /* Ignore if report count is out of bounds. */
  987. if (rep->field[i]->report_count < 1)
  988. continue;
  989. for (j = 0; j < rep->field[i]->maxusage; j++) {
  990. usage = &rep->field[i]->usage[j];
  991. if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
  992. hid_apply_multiplier(hid,
  993. rep->field[i]);
  994. }
  995. }
  996. }
  997. }
  998. EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
  999. /**
  1000. * hid_open_report - open a driver-specific device report
  1001. *
  1002. * @device: hid device
  1003. *
  1004. * Parse a report description into a hid_device structure. Reports are
  1005. * enumerated, fields are attached to these reports.
  1006. * 0 returned on success, otherwise nonzero error value.
  1007. *
  1008. * This function (or the equivalent hid_parse() macro) should only be
  1009. * called from probe() in drivers, before starting the device.
  1010. */
  1011. int hid_open_report(struct hid_device *device)
  1012. {
  1013. struct hid_parser *parser;
  1014. struct hid_item item;
  1015. unsigned int size;
  1016. __u8 *start;
  1017. __u8 *buf;
  1018. __u8 *end;
  1019. __u8 *next;
  1020. int ret;
  1021. int i;
  1022. static int (*dispatch_type[])(struct hid_parser *parser,
  1023. struct hid_item *item) = {
  1024. hid_parser_main,
  1025. hid_parser_global,
  1026. hid_parser_local,
  1027. hid_parser_reserved
  1028. };
  1029. if (WARN_ON(device->status & HID_STAT_PARSED))
  1030. return -EBUSY;
  1031. start = device->dev_rdesc;
  1032. if (WARN_ON(!start))
  1033. return -ENODEV;
  1034. size = device->dev_rsize;
  1035. buf = kmemdup(start, size, GFP_KERNEL);
  1036. if (buf == NULL)
  1037. return -ENOMEM;
  1038. if (device->driver->report_fixup)
  1039. start = device->driver->report_fixup(device, buf, &size);
  1040. else
  1041. start = buf;
  1042. start = kmemdup(start, size, GFP_KERNEL);
  1043. kfree(buf);
  1044. if (start == NULL)
  1045. return -ENOMEM;
  1046. device->rdesc = start;
  1047. device->rsize = size;
  1048. parser = vzalloc(sizeof(struct hid_parser));
  1049. if (!parser) {
  1050. ret = -ENOMEM;
  1051. goto alloc_err;
  1052. }
  1053. parser->device = device;
  1054. end = start + size;
  1055. device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
  1056. sizeof(struct hid_collection), GFP_KERNEL);
  1057. if (!device->collection) {
  1058. ret = -ENOMEM;
  1059. goto err;
  1060. }
  1061. device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
  1062. for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
  1063. device->collection[i].parent_idx = -1;
  1064. ret = -EINVAL;
  1065. while ((next = fetch_item(start, end, &item)) != NULL) {
  1066. start = next;
  1067. if (item.format != HID_ITEM_FORMAT_SHORT) {
  1068. hid_err(device, "unexpected long global item\n");
  1069. goto err;
  1070. }
  1071. if (dispatch_type[item.type](parser, &item)) {
  1072. hid_err(device, "item %u %u %u %u parsing failed\n",
  1073. item.format, (unsigned)item.size,
  1074. (unsigned)item.type, (unsigned)item.tag);
  1075. goto err;
  1076. }
  1077. if (start == end) {
  1078. if (parser->collection_stack_ptr) {
  1079. hid_err(device, "unbalanced collection at end of report description\n");
  1080. goto err;
  1081. }
  1082. if (parser->local.delimiter_depth) {
  1083. hid_err(device, "unbalanced delimiter at end of report description\n");
  1084. goto err;
  1085. }
  1086. /*
  1087. * fetch initial values in case the device's
  1088. * default multiplier isn't the recommended 1
  1089. */
  1090. hid_setup_resolution_multiplier(device);
  1091. kfree(parser->collection_stack);
  1092. vfree(parser);
  1093. device->status |= HID_STAT_PARSED;
  1094. return 0;
  1095. }
  1096. }
  1097. hid_err(device, "item fetching failed at offset %u/%u\n",
  1098. size - (unsigned int)(end - start), size);
  1099. err:
  1100. kfree(parser->collection_stack);
  1101. alloc_err:
  1102. vfree(parser);
  1103. hid_close_report(device);
  1104. return ret;
  1105. }
  1106. EXPORT_SYMBOL_GPL(hid_open_report);
  1107. /*
  1108. * Convert a signed n-bit integer to signed 32-bit integer. Common
  1109. * cases are done through the compiler, the screwed things has to be
  1110. * done by hand.
  1111. */
  1112. static s32 snto32(__u32 value, unsigned n)
  1113. {
  1114. if (!value || !n)
  1115. return 0;
  1116. if (n > 32)
  1117. n = 32;
  1118. switch (n) {
  1119. case 8: return ((__s8)value);
  1120. case 16: return ((__s16)value);
  1121. case 32: return ((__s32)value);
  1122. }
  1123. return value & (1 << (n - 1)) ? value | (~0U << n) : value;
  1124. }
  1125. s32 hid_snto32(__u32 value, unsigned n)
  1126. {
  1127. return snto32(value, n);
  1128. }
  1129. EXPORT_SYMBOL_GPL(hid_snto32);
  1130. /*
  1131. * Convert a signed 32-bit integer to a signed n-bit integer.
  1132. */
  1133. static u32 s32ton(__s32 value, unsigned n)
  1134. {
  1135. s32 a = value >> (n - 1);
  1136. if (a && a != -1)
  1137. return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
  1138. return value & ((1 << n) - 1);
  1139. }
  1140. /*
  1141. * Extract/implement a data field from/to a little endian report (bit array).
  1142. *
  1143. * Code sort-of follows HID spec:
  1144. * http://www.usb.org/developers/hidpage/HID1_11.pdf
  1145. *
  1146. * While the USB HID spec allows unlimited length bit fields in "report
  1147. * descriptors", most devices never use more than 16 bits.
  1148. * One model of UPS is claimed to report "LINEV" as a 32-bit field.
  1149. * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
  1150. */
  1151. static u32 __extract(u8 *report, unsigned offset, int n)
  1152. {
  1153. unsigned int idx = offset / 8;
  1154. unsigned int bit_nr = 0;
  1155. unsigned int bit_shift = offset % 8;
  1156. int bits_to_copy = 8 - bit_shift;
  1157. u32 value = 0;
  1158. u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
  1159. while (n > 0) {
  1160. value |= ((u32)report[idx] >> bit_shift) << bit_nr;
  1161. n -= bits_to_copy;
  1162. bit_nr += bits_to_copy;
  1163. bits_to_copy = 8;
  1164. bit_shift = 0;
  1165. idx++;
  1166. }
  1167. return value & mask;
  1168. }
  1169. u32 hid_field_extract(const struct hid_device *hid, u8 *report,
  1170. unsigned offset, unsigned n)
  1171. {
  1172. if (n > 32) {
  1173. hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
  1174. __func__, n, current->comm);
  1175. n = 32;
  1176. }
  1177. return __extract(report, offset, n);
  1178. }
  1179. EXPORT_SYMBOL_GPL(hid_field_extract);
  1180. /*
  1181. * "implement" : set bits in a little endian bit stream.
  1182. * Same concepts as "extract" (see comments above).
  1183. * The data mangled in the bit stream remains in little endian
  1184. * order the whole time. It make more sense to talk about
  1185. * endianness of register values by considering a register
  1186. * a "cached" copy of the little endian bit stream.
  1187. */
  1188. static void __implement(u8 *report, unsigned offset, int n, u32 value)
  1189. {
  1190. unsigned int idx = offset / 8;
  1191. unsigned int bit_shift = offset % 8;
  1192. int bits_to_set = 8 - bit_shift;
  1193. while (n - bits_to_set >= 0) {
  1194. report[idx] &= ~(0xff << bit_shift);
  1195. report[idx] |= value << bit_shift;
  1196. value >>= bits_to_set;
  1197. n -= bits_to_set;
  1198. bits_to_set = 8;
  1199. bit_shift = 0;
  1200. idx++;
  1201. }
  1202. /* last nibble */
  1203. if (n) {
  1204. u8 bit_mask = ((1U << n) - 1);
  1205. report[idx] &= ~(bit_mask << bit_shift);
  1206. report[idx] |= value << bit_shift;
  1207. }
  1208. }
  1209. static void implement(const struct hid_device *hid, u8 *report,
  1210. unsigned offset, unsigned n, u32 value)
  1211. {
  1212. if (unlikely(n > 32)) {
  1213. hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
  1214. __func__, n, current->comm);
  1215. n = 32;
  1216. } else if (n < 32) {
  1217. u32 m = (1U << n) - 1;
  1218. if (unlikely(value > m)) {
  1219. hid_warn(hid,
  1220. "%s() called with too large value %d (n: %d)! (%s)\n",
  1221. __func__, value, n, current->comm);
  1222. WARN_ON(1);
  1223. value &= m;
  1224. }
  1225. }
  1226. __implement(report, offset, n, value);
  1227. }
  1228. /*
  1229. * Search an array for a value.
  1230. */
  1231. static int search(__s32 *array, __s32 value, unsigned n)
  1232. {
  1233. while (n--) {
  1234. if (*array++ == value)
  1235. return 0;
  1236. }
  1237. return -1;
  1238. }
  1239. /**
  1240. * hid_match_report - check if driver's raw_event should be called
  1241. *
  1242. * @hid: hid device
  1243. * @report: hid report to match against
  1244. *
  1245. * compare hid->driver->report_table->report_type to report->type
  1246. */
  1247. static int hid_match_report(struct hid_device *hid, struct hid_report *report)
  1248. {
  1249. const struct hid_report_id *id = hid->driver->report_table;
  1250. if (!id) /* NULL means all */
  1251. return 1;
  1252. for (; id->report_type != HID_TERMINATOR; id++)
  1253. if (id->report_type == HID_ANY_ID ||
  1254. id->report_type == report->type)
  1255. return 1;
  1256. return 0;
  1257. }
  1258. /**
  1259. * hid_match_usage - check if driver's event should be called
  1260. *
  1261. * @hid: hid device
  1262. * @usage: usage to match against
  1263. *
  1264. * compare hid->driver->usage_table->usage_{type,code} to
  1265. * usage->usage_{type,code}
  1266. */
  1267. static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
  1268. {
  1269. const struct hid_usage_id *id = hid->driver->usage_table;
  1270. if (!id) /* NULL means all */
  1271. return 1;
  1272. for (; id->usage_type != HID_ANY_ID - 1; id++)
  1273. if ((id->usage_hid == HID_ANY_ID ||
  1274. id->usage_hid == usage->hid) &&
  1275. (id->usage_type == HID_ANY_ID ||
  1276. id->usage_type == usage->type) &&
  1277. (id->usage_code == HID_ANY_ID ||
  1278. id->usage_code == usage->code))
  1279. return 1;
  1280. return 0;
  1281. }
  1282. static void hid_process_event(struct hid_device *hid, struct hid_field *field,
  1283. struct hid_usage *usage, __s32 value, int interrupt)
  1284. {
  1285. struct hid_driver *hdrv = hid->driver;
  1286. int ret;
  1287. if (!list_empty(&hid->debug_list))
  1288. hid_dump_input(hid, usage, value);
  1289. if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
  1290. ret = hdrv->event(hid, field, usage, value);
  1291. if (ret != 0) {
  1292. if (ret < 0)
  1293. hid_err(hid, "%s's event failed with %d\n",
  1294. hdrv->name, ret);
  1295. return;
  1296. }
  1297. }
  1298. if (hid->claimed & HID_CLAIMED_INPUT)
  1299. hidinput_hid_event(hid, field, usage, value);
  1300. if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
  1301. hid->hiddev_hid_event(hid, field, usage, value);
  1302. }
  1303. /*
  1304. * Checks if the given value is valid within this field
  1305. */
  1306. static inline int hid_array_value_is_valid(struct hid_field *field,
  1307. __s32 value)
  1308. {
  1309. __s32 min = field->logical_minimum;
  1310. /*
  1311. * Value needs to be between logical min and max, and
  1312. * (value - min) is used as an index in the usage array.
  1313. * This array is of size field->maxusage
  1314. */
  1315. return value >= min &&
  1316. value <= field->logical_maximum &&
  1317. value - min < field->maxusage;
  1318. }
  1319. /*
  1320. * Fetch the field from the data. The field content is stored for next
  1321. * report processing (we do differential reporting to the layer).
  1322. */
  1323. static void hid_input_fetch_field(struct hid_device *hid,
  1324. struct hid_field *field,
  1325. __u8 *data)
  1326. {
  1327. unsigned n;
  1328. unsigned count = field->report_count;
  1329. unsigned offset = field->report_offset;
  1330. unsigned size = field->report_size;
  1331. __s32 min = field->logical_minimum;
  1332. __s32 *value;
  1333. value = field->new_value;
  1334. memset(value, 0, count * sizeof(__s32));
  1335. field->ignored = false;
  1336. for (n = 0; n < count; n++) {
  1337. value[n] = min < 0 ?
  1338. snto32(hid_field_extract(hid, data, offset + n * size,
  1339. size), size) :
  1340. hid_field_extract(hid, data, offset + n * size, size);
  1341. /* Ignore report if ErrorRollOver */
  1342. if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
  1343. hid_array_value_is_valid(field, value[n]) &&
  1344. field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
  1345. field->ignored = true;
  1346. return;
  1347. }
  1348. }
  1349. }
  1350. /*
  1351. * Process a received variable field.
  1352. */
  1353. static void hid_input_var_field(struct hid_device *hid,
  1354. struct hid_field *field,
  1355. int interrupt)
  1356. {
  1357. unsigned int count = field->report_count;
  1358. __s32 *value = field->new_value;
  1359. unsigned int n;
  1360. for (n = 0; n < count; n++)
  1361. hid_process_event(hid,
  1362. field,
  1363. &field->usage[n],
  1364. value[n],
  1365. interrupt);
  1366. memcpy(field->value, value, count * sizeof(__s32));
  1367. }
  1368. /*
  1369. * Process a received array field. The field content is stored for
  1370. * next report processing (we do differential reporting to the layer).
  1371. */
  1372. static void hid_input_array_field(struct hid_device *hid,
  1373. struct hid_field *field,
  1374. int interrupt)
  1375. {
  1376. unsigned int n;
  1377. unsigned int count = field->report_count;
  1378. __s32 min = field->logical_minimum;
  1379. __s32 *value;
  1380. value = field->new_value;
  1381. /* ErrorRollOver */
  1382. if (field->ignored)
  1383. return;
  1384. for (n = 0; n < count; n++) {
  1385. if (hid_array_value_is_valid(field, field->value[n]) &&
  1386. search(value, field->value[n], count))
  1387. hid_process_event(hid,
  1388. field,
  1389. &field->usage[field->value[n] - min],
  1390. 0,
  1391. interrupt);
  1392. if (hid_array_value_is_valid(field, value[n]) &&
  1393. search(field->value, value[n], count))
  1394. hid_process_event(hid,
  1395. field,
  1396. &field->usage[value[n] - min],
  1397. 1,
  1398. interrupt);
  1399. }
  1400. memcpy(field->value, value, count * sizeof(__s32));
  1401. }
  1402. /*
  1403. * Analyse a received report, and fetch the data from it. The field
  1404. * content is stored for next report processing (we do differential
  1405. * reporting to the layer).
  1406. */
  1407. static void hid_process_report(struct hid_device *hid,
  1408. struct hid_report *report,
  1409. __u8 *data,
  1410. int interrupt)
  1411. {
  1412. unsigned int a;
  1413. struct hid_field_entry *entry;
  1414. struct hid_field *field;
  1415. /* first retrieve all incoming values in data */
  1416. for (a = 0; a < report->maxfield; a++)
  1417. hid_input_fetch_field(hid, report->field[a], data);
  1418. if (!list_empty(&report->field_entry_list)) {
  1419. /* INPUT_REPORT, we have a priority list of fields */
  1420. list_for_each_entry(entry,
  1421. &report->field_entry_list,
  1422. list) {
  1423. field = entry->field;
  1424. if (field->flags & HID_MAIN_ITEM_VARIABLE)
  1425. hid_process_event(hid,
  1426. field,
  1427. &field->usage[entry->index],
  1428. field->new_value[entry->index],
  1429. interrupt);
  1430. else
  1431. hid_input_array_field(hid, field, interrupt);
  1432. }
  1433. /* we need to do the memcpy at the end for var items */
  1434. for (a = 0; a < report->maxfield; a++) {
  1435. field = report->field[a];
  1436. if (field->flags & HID_MAIN_ITEM_VARIABLE)
  1437. memcpy(field->value, field->new_value,
  1438. field->report_count * sizeof(__s32));
  1439. }
  1440. } else {
  1441. /* FEATURE_REPORT, regular processing */
  1442. for (a = 0; a < report->maxfield; a++) {
  1443. field = report->field[a];
  1444. if (field->flags & HID_MAIN_ITEM_VARIABLE)
  1445. hid_input_var_field(hid, field, interrupt);
  1446. else
  1447. hid_input_array_field(hid, field, interrupt);
  1448. }
  1449. }
  1450. }
  1451. /*
  1452. * Insert a given usage_index in a field in the list
  1453. * of processed usages in the report.
  1454. *
  1455. * The elements of lower priority score are processed
  1456. * first.
  1457. */
  1458. static void __hid_insert_field_entry(struct hid_device *hid,
  1459. struct hid_report *report,
  1460. struct hid_field_entry *entry,
  1461. struct hid_field *field,
  1462. unsigned int usage_index)
  1463. {
  1464. struct hid_field_entry *next;
  1465. entry->field = field;
  1466. entry->index = usage_index;
  1467. entry->priority = field->usages_priorities[usage_index];
  1468. /* insert the element at the correct position */
  1469. list_for_each_entry(next,
  1470. &report->field_entry_list,
  1471. list) {
  1472. /*
  1473. * the priority of our element is strictly higher
  1474. * than the next one, insert it before
  1475. */
  1476. if (entry->priority > next->priority) {
  1477. list_add_tail(&entry->list, &next->list);
  1478. return;
  1479. }
  1480. }
  1481. /* lowest priority score: insert at the end */
  1482. list_add_tail(&entry->list, &report->field_entry_list);
  1483. }
  1484. static void hid_report_process_ordering(struct hid_device *hid,
  1485. struct hid_report *report)
  1486. {
  1487. struct hid_field *field;
  1488. struct hid_field_entry *entries;
  1489. unsigned int a, u, usages;
  1490. unsigned int count = 0;
  1491. /* count the number of individual fields in the report */
  1492. for (a = 0; a < report->maxfield; a++) {
  1493. field = report->field[a];
  1494. if (field->flags & HID_MAIN_ITEM_VARIABLE)
  1495. count += field->report_count;
  1496. else
  1497. count++;
  1498. }
  1499. /* allocate the memory to process the fields */
  1500. entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
  1501. if (!entries)
  1502. return;
  1503. report->field_entries = entries;
  1504. /*
  1505. * walk through all fields in the report and
  1506. * store them by priority order in report->field_entry_list
  1507. *
  1508. * - Var elements are individualized (field + usage_index)
  1509. * - Arrays are taken as one, we can not chose an order for them
  1510. */
  1511. usages = 0;
  1512. for (a = 0; a < report->maxfield; a++) {
  1513. field = report->field[a];
  1514. if (field->flags & HID_MAIN_ITEM_VARIABLE) {
  1515. for (u = 0; u < field->report_count; u++) {
  1516. __hid_insert_field_entry(hid, report,
  1517. &entries[usages],
  1518. field, u);
  1519. usages++;
  1520. }
  1521. } else {
  1522. __hid_insert_field_entry(hid, report, &entries[usages],
  1523. field, 0);
  1524. usages++;
  1525. }
  1526. }
  1527. }
  1528. static void hid_process_ordering(struct hid_device *hid)
  1529. {
  1530. struct hid_report *report;
  1531. struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
  1532. list_for_each_entry(report, &report_enum->report_list, list)
  1533. hid_report_process_ordering(hid, report);
  1534. }
  1535. /*
  1536. * Output the field into the report.
  1537. */
  1538. static void hid_output_field(const struct hid_device *hid,
  1539. struct hid_field *field, __u8 *data)
  1540. {
  1541. unsigned count = field->report_count;
  1542. unsigned offset = field->report_offset;
  1543. unsigned size = field->report_size;
  1544. unsigned n;
  1545. for (n = 0; n < count; n++) {
  1546. if (field->logical_minimum < 0) /* signed values */
  1547. implement(hid, data, offset + n * size, size,
  1548. s32ton(field->value[n], size));
  1549. else /* unsigned values */
  1550. implement(hid, data, offset + n * size, size,
  1551. field->value[n]);
  1552. }
  1553. }
  1554. /*
  1555. * Compute the size of a report.
  1556. */
  1557. static size_t hid_compute_report_size(struct hid_report *report)
  1558. {
  1559. if (report->size)
  1560. return ((report->size - 1) >> 3) + 1;
  1561. return 0;
  1562. }
  1563. /*
  1564. * Create a report. 'data' has to be allocated using
  1565. * hid_alloc_report_buf() so that it has proper size.
  1566. */
  1567. void hid_output_report(struct hid_report *report, __u8 *data)
  1568. {
  1569. unsigned n;
  1570. if (report->id > 0)
  1571. *data++ = report->id;
  1572. memset(data, 0, hid_compute_report_size(report));
  1573. for (n = 0; n < report->maxfield; n++)
  1574. hid_output_field(report->device, report->field[n], data);
  1575. }
  1576. EXPORT_SYMBOL_GPL(hid_output_report);
  1577. /*
  1578. * Allocator for buffer that is going to be passed to hid_output_report()
  1579. */
  1580. u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
  1581. {
  1582. /*
  1583. * 7 extra bytes are necessary to achieve proper functionality
  1584. * of implement() working on 8 byte chunks
  1585. */
  1586. u32 len = hid_report_len(report) + 7;
  1587. return kmalloc(len, flags);
  1588. }
  1589. EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
  1590. /*
  1591. * Set a field value. The report this field belongs to has to be
  1592. * created and transferred to the device, to set this value in the
  1593. * device.
  1594. */
  1595. int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
  1596. {
  1597. unsigned size;
  1598. if (!field)
  1599. return -1;
  1600. size = field->report_size;
  1601. hid_dump_input(field->report->device, field->usage + offset, value);
  1602. if (offset >= field->report_count) {
  1603. hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
  1604. offset, field->report_count);
  1605. return -1;
  1606. }
  1607. if (field->logical_minimum < 0) {
  1608. if (value != snto32(s32ton(value, size), size)) {
  1609. hid_err(field->report->device, "value %d is out of range\n", value);
  1610. return -1;
  1611. }
  1612. }
  1613. field->value[offset] = value;
  1614. return 0;
  1615. }
  1616. EXPORT_SYMBOL_GPL(hid_set_field);
  1617. static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
  1618. const u8 *data)
  1619. {
  1620. struct hid_report *report;
  1621. unsigned int n = 0; /* Normally report number is 0 */
  1622. /* Device uses numbered reports, data[0] is report number */
  1623. if (report_enum->numbered)
  1624. n = *data;
  1625. report = report_enum->report_id_hash[n];
  1626. if (report == NULL)
  1627. dbg_hid("undefined report_id %u received\n", n);
  1628. return report;
  1629. }
  1630. /*
  1631. * Implement a generic .request() callback, using .raw_request()
  1632. * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
  1633. */
  1634. int __hid_request(struct hid_device *hid, struct hid_report *report,
  1635. enum hid_class_request reqtype)
  1636. {
  1637. char *buf;
  1638. int ret;
  1639. u32 len;
  1640. buf = hid_alloc_report_buf(report, GFP_KERNEL);
  1641. if (!buf)
  1642. return -ENOMEM;
  1643. len = hid_report_len(report);
  1644. if (reqtype == HID_REQ_SET_REPORT)
  1645. hid_output_report(report, buf);
  1646. ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
  1647. report->type, reqtype);
  1648. if (ret < 0) {
  1649. dbg_hid("unable to complete request: %d\n", ret);
  1650. goto out;
  1651. }
  1652. if (reqtype == HID_REQ_GET_REPORT)
  1653. hid_input_report(hid, report->type, buf, ret, 0);
  1654. ret = 0;
  1655. out:
  1656. kfree(buf);
  1657. return ret;
  1658. }
  1659. EXPORT_SYMBOL_GPL(__hid_request);
  1660. int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
  1661. int interrupt)
  1662. {
  1663. struct hid_report_enum *report_enum = hid->report_enum + type;
  1664. struct hid_report *report;
  1665. struct hid_driver *hdrv;
  1666. int max_buffer_size = HID_MAX_BUFFER_SIZE;
  1667. u32 rsize, csize = size;
  1668. u8 *cdata = data;
  1669. int ret = 0;
  1670. report = hid_get_report(report_enum, data);
  1671. if (!report)
  1672. goto out;
  1673. if (report_enum->numbered) {
  1674. cdata++;
  1675. csize--;
  1676. }
  1677. rsize = hid_compute_report_size(report);
  1678. if (IS_ENABLED(CONFIG_UHID) && hid->ll_driver == &uhid_hid_driver)
  1679. max_buffer_size = UHID_DATA_MAX;
  1680. if (report_enum->numbered && rsize >= max_buffer_size)
  1681. rsize = max_buffer_size - 1;
  1682. else if (rsize > max_buffer_size)
  1683. rsize = max_buffer_size;
  1684. if (csize < rsize) {
  1685. dbg_hid("report %d is too short, (%d < %d)\n", report->id,
  1686. csize, rsize);
  1687. memset(cdata + csize, 0, rsize - csize);
  1688. }
  1689. if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
  1690. hid->hiddev_report_event(hid, report);
  1691. if (hid->claimed & HID_CLAIMED_HIDRAW) {
  1692. ret = hidraw_report_event(hid, data, size);
  1693. if (ret)
  1694. goto out;
  1695. }
  1696. if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
  1697. hid_process_report(hid, report, cdata, interrupt);
  1698. hdrv = hid->driver;
  1699. if (hdrv && hdrv->report)
  1700. hdrv->report(hid, report);
  1701. }
  1702. if (hid->claimed & HID_CLAIMED_INPUT)
  1703. hidinput_report_event(hid, report);
  1704. out:
  1705. return ret;
  1706. }
  1707. EXPORT_SYMBOL_GPL(hid_report_raw_event);
  1708. /**
  1709. * hid_input_report - report data from lower layer (usb, bt...)
  1710. *
  1711. * @hid: hid device
  1712. * @type: HID report type (HID_*_REPORT)
  1713. * @data: report contents
  1714. * @size: size of data parameter
  1715. * @interrupt: distinguish between interrupt and control transfers
  1716. *
  1717. * This is data entry for lower layers.
  1718. */
  1719. int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
  1720. int interrupt)
  1721. {
  1722. struct hid_report_enum *report_enum;
  1723. struct hid_driver *hdrv;
  1724. struct hid_report *report;
  1725. int ret = 0;
  1726. if (!hid)
  1727. return -ENODEV;
  1728. if (down_trylock(&hid->driver_input_lock))
  1729. return -EBUSY;
  1730. if (!hid->driver) {
  1731. ret = -ENODEV;
  1732. goto unlock;
  1733. }
  1734. report_enum = hid->report_enum + type;
  1735. hdrv = hid->driver;
  1736. if (!size) {
  1737. dbg_hid("empty report\n");
  1738. ret = -1;
  1739. goto unlock;
  1740. }
  1741. /* Avoid unnecessary overhead if debugfs is disabled */
  1742. if (!list_empty(&hid->debug_list))
  1743. hid_dump_report(hid, type, data, size);
  1744. report = hid_get_report(report_enum, data);
  1745. if (!report) {
  1746. ret = -1;
  1747. goto unlock;
  1748. }
  1749. if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
  1750. ret = hdrv->raw_event(hid, report, data, size);
  1751. if (ret < 0)
  1752. goto unlock;
  1753. }
  1754. ret = hid_report_raw_event(hid, type, data, size, interrupt);
  1755. unlock:
  1756. up(&hid->driver_input_lock);
  1757. return ret;
  1758. }
  1759. EXPORT_SYMBOL_GPL(hid_input_report);
  1760. bool hid_match_one_id(const struct hid_device *hdev,
  1761. const struct hid_device_id *id)
  1762. {
  1763. return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
  1764. (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
  1765. (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
  1766. (id->product == HID_ANY_ID || id->product == hdev->product);
  1767. }
  1768. const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
  1769. const struct hid_device_id *id)
  1770. {
  1771. for (; id->bus; id++)
  1772. if (hid_match_one_id(hdev, id))
  1773. return id;
  1774. return NULL;
  1775. }
  1776. EXPORT_SYMBOL_GPL(hid_match_id);
  1777. static const struct hid_device_id hid_hiddev_list[] = {
  1778. { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
  1779. { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
  1780. { }
  1781. };
  1782. static bool hid_hiddev(struct hid_device *hdev)
  1783. {
  1784. return !!hid_match_id(hdev, hid_hiddev_list);
  1785. }
  1786. static ssize_t
  1787. read_report_descriptor(struct file *filp, struct kobject *kobj,
  1788. struct bin_attribute *attr,
  1789. char *buf, loff_t off, size_t count)
  1790. {
  1791. struct device *dev = kobj_to_dev(kobj);
  1792. struct hid_device *hdev = to_hid_device(dev);
  1793. if (off >= hdev->rsize)
  1794. return 0;
  1795. if (off + count > hdev->rsize)
  1796. count = hdev->rsize - off;
  1797. memcpy(buf, hdev->rdesc + off, count);
  1798. return count;
  1799. }
  1800. static ssize_t
  1801. show_country(struct device *dev, struct device_attribute *attr,
  1802. char *buf)
  1803. {
  1804. struct hid_device *hdev = to_hid_device(dev);
  1805. return sprintf(buf, "%02x\n", hdev->country & 0xff);
  1806. }
  1807. static struct bin_attribute dev_bin_attr_report_desc = {
  1808. .attr = { .name = "report_descriptor", .mode = 0444 },
  1809. .read = read_report_descriptor,
  1810. .size = HID_MAX_DESCRIPTOR_SIZE,
  1811. };
  1812. static const struct device_attribute dev_attr_country = {
  1813. .attr = { .name = "country", .mode = 0444 },
  1814. .show = show_country,
  1815. };
  1816. int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
  1817. {
  1818. static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
  1819. "Joystick", "Gamepad", "Keyboard", "Keypad",
  1820. "Multi-Axis Controller"
  1821. };
  1822. const char *type, *bus;
  1823. char buf[64] = "";
  1824. unsigned int i;
  1825. int len;
  1826. int ret;
  1827. if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
  1828. connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
  1829. if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
  1830. connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
  1831. if (hdev->bus != BUS_USB)
  1832. connect_mask &= ~HID_CONNECT_HIDDEV;
  1833. if (hid_hiddev(hdev))
  1834. connect_mask |= HID_CONNECT_HIDDEV_FORCE;
  1835. if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
  1836. connect_mask & HID_CONNECT_HIDINPUT_FORCE))
  1837. hdev->claimed |= HID_CLAIMED_INPUT;
  1838. if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
  1839. !hdev->hiddev_connect(hdev,
  1840. connect_mask & HID_CONNECT_HIDDEV_FORCE))
  1841. hdev->claimed |= HID_CLAIMED_HIDDEV;
  1842. if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
  1843. hdev->claimed |= HID_CLAIMED_HIDRAW;
  1844. if (connect_mask & HID_CONNECT_DRIVER)
  1845. hdev->claimed |= HID_CLAIMED_DRIVER;
  1846. /* Drivers with the ->raw_event callback set are not required to connect
  1847. * to any other listener. */
  1848. if (!hdev->claimed && !hdev->driver->raw_event) {
  1849. hid_err(hdev, "device has no listeners, quitting\n");
  1850. return -ENODEV;
  1851. }
  1852. hid_process_ordering(hdev);
  1853. if ((hdev->claimed & HID_CLAIMED_INPUT) &&
  1854. (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
  1855. hdev->ff_init(hdev);
  1856. len = 0;
  1857. if (hdev->claimed & HID_CLAIMED_INPUT)
  1858. len += sprintf(buf + len, "input");
  1859. if (hdev->claimed & HID_CLAIMED_HIDDEV)
  1860. len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
  1861. ((struct hiddev *)hdev->hiddev)->minor);
  1862. if (hdev->claimed & HID_CLAIMED_HIDRAW)
  1863. len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
  1864. ((struct hidraw *)hdev->hidraw)->minor);
  1865. type = "Device";
  1866. for (i = 0; i < hdev->maxcollection; i++) {
  1867. struct hid_collection *col = &hdev->collection[i];
  1868. if (col->type == HID_COLLECTION_APPLICATION &&
  1869. (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
  1870. (col->usage & 0xffff) < ARRAY_SIZE(types)) {
  1871. type = types[col->usage & 0xffff];
  1872. break;
  1873. }
  1874. }
  1875. switch (hdev->bus) {
  1876. case BUS_USB:
  1877. bus = "USB";
  1878. break;
  1879. case BUS_BLUETOOTH:
  1880. bus = "BLUETOOTH";
  1881. break;
  1882. case BUS_I2C:
  1883. bus = "I2C";
  1884. break;
  1885. case BUS_VIRTUAL:
  1886. bus = "VIRTUAL";
  1887. break;
  1888. case BUS_INTEL_ISHTP:
  1889. case BUS_AMD_SFH:
  1890. bus = "SENSOR HUB";
  1891. break;
  1892. default:
  1893. bus = "<UNKNOWN>";
  1894. }
  1895. ret = device_create_file(&hdev->dev, &dev_attr_country);
  1896. if (ret)
  1897. hid_warn(hdev,
  1898. "can't create sysfs country code attribute err: %d\n", ret);
  1899. hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
  1900. buf, bus, hdev->version >> 8, hdev->version & 0xff,
  1901. type, hdev->name, hdev->phys);
  1902. return 0;
  1903. }
  1904. EXPORT_SYMBOL_GPL(hid_connect);
  1905. void hid_disconnect(struct hid_device *hdev)
  1906. {
  1907. device_remove_file(&hdev->dev, &dev_attr_country);
  1908. if (hdev->claimed & HID_CLAIMED_INPUT)
  1909. hidinput_disconnect(hdev);
  1910. if (hdev->claimed & HID_CLAIMED_HIDDEV)
  1911. hdev->hiddev_disconnect(hdev);
  1912. if (hdev->claimed & HID_CLAIMED_HIDRAW)
  1913. hidraw_disconnect(hdev);
  1914. hdev->claimed = 0;
  1915. }
  1916. EXPORT_SYMBOL_GPL(hid_disconnect);
  1917. /**
  1918. * hid_hw_start - start underlying HW
  1919. * @hdev: hid device
  1920. * @connect_mask: which outputs to connect, see HID_CONNECT_*
  1921. *
  1922. * Call this in probe function *after* hid_parse. This will setup HW
  1923. * buffers and start the device (if not defeirred to device open).
  1924. * hid_hw_stop must be called if this was successful.
  1925. */
  1926. int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
  1927. {
  1928. int error;
  1929. error = hdev->ll_driver->start(hdev);
  1930. if (error)
  1931. return error;
  1932. if (connect_mask) {
  1933. error = hid_connect(hdev, connect_mask);
  1934. if (error) {
  1935. hdev->ll_driver->stop(hdev);
  1936. return error;
  1937. }
  1938. }
  1939. return 0;
  1940. }
  1941. EXPORT_SYMBOL_GPL(hid_hw_start);
  1942. /**
  1943. * hid_hw_stop - stop underlying HW
  1944. * @hdev: hid device
  1945. *
  1946. * This is usually called from remove function or from probe when something
  1947. * failed and hid_hw_start was called already.
  1948. */
  1949. void hid_hw_stop(struct hid_device *hdev)
  1950. {
  1951. hid_disconnect(hdev);
  1952. hdev->ll_driver->stop(hdev);
  1953. }
  1954. EXPORT_SYMBOL_GPL(hid_hw_stop);
  1955. /**
  1956. * hid_hw_open - signal underlying HW to start delivering events
  1957. * @hdev: hid device
  1958. *
  1959. * Tell underlying HW to start delivering events from the device.
  1960. * This function should be called sometime after successful call
  1961. * to hid_hw_start().
  1962. */
  1963. int hid_hw_open(struct hid_device *hdev)
  1964. {
  1965. int ret;
  1966. ret = mutex_lock_killable(&hdev->ll_open_lock);
  1967. if (ret)
  1968. return ret;
  1969. if (!hdev->ll_open_count++) {
  1970. ret = hdev->ll_driver->open(hdev);
  1971. if (ret)
  1972. hdev->ll_open_count--;
  1973. }
  1974. mutex_unlock(&hdev->ll_open_lock);
  1975. return ret;
  1976. }
  1977. EXPORT_SYMBOL_GPL(hid_hw_open);
  1978. /**
  1979. * hid_hw_close - signal underlaying HW to stop delivering events
  1980. *
  1981. * @hdev: hid device
  1982. *
  1983. * This function indicates that we are not interested in the events
  1984. * from this device anymore. Delivery of events may or may not stop,
  1985. * depending on the number of users still outstanding.
  1986. */
  1987. void hid_hw_close(struct hid_device *hdev)
  1988. {
  1989. mutex_lock(&hdev->ll_open_lock);
  1990. if (!--hdev->ll_open_count)
  1991. hdev->ll_driver->close(hdev);
  1992. mutex_unlock(&hdev->ll_open_lock);
  1993. }
  1994. EXPORT_SYMBOL_GPL(hid_hw_close);
  1995. /**
  1996. * hid_hw_request - send report request to device
  1997. *
  1998. * @hdev: hid device
  1999. * @report: report to send
  2000. * @reqtype: hid request type
  2001. */
  2002. void hid_hw_request(struct hid_device *hdev,
  2003. struct hid_report *report, enum hid_class_request reqtype)
  2004. {
  2005. if (hdev->ll_driver->request)
  2006. return hdev->ll_driver->request(hdev, report, reqtype);
  2007. __hid_request(hdev, report, reqtype);
  2008. }
  2009. EXPORT_SYMBOL_GPL(hid_hw_request);
  2010. /**
  2011. * hid_hw_raw_request - send report request to device
  2012. *
  2013. * @hdev: hid device
  2014. * @reportnum: report ID
  2015. * @buf: in/out data to transfer
  2016. * @len: length of buf
  2017. * @rtype: HID report type
  2018. * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
  2019. *
  2020. * Return: count of data transferred, negative if error
  2021. *
  2022. * Same behavior as hid_hw_request, but with raw buffers instead.
  2023. */
  2024. int hid_hw_raw_request(struct hid_device *hdev,
  2025. unsigned char reportnum, __u8 *buf,
  2026. size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
  2027. {
  2028. unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
  2029. if (IS_ENABLED(CONFIG_UHID) && hdev->ll_driver == &uhid_hid_driver)
  2030. max_buffer_size = UHID_DATA_MAX;
  2031. if (len < 1 || len > max_buffer_size || !buf)
  2032. return -EINVAL;
  2033. return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
  2034. rtype, reqtype);
  2035. }
  2036. EXPORT_SYMBOL_GPL(hid_hw_raw_request);
  2037. /**
  2038. * hid_hw_output_report - send output report to device
  2039. *
  2040. * @hdev: hid device
  2041. * @buf: raw data to transfer
  2042. * @len: length of buf
  2043. *
  2044. * Return: count of data transferred, negative if error
  2045. */
  2046. int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
  2047. {
  2048. unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
  2049. if (IS_ENABLED(CONFIG_UHID) && hdev->ll_driver == &uhid_hid_driver)
  2050. max_buffer_size = UHID_DATA_MAX;
  2051. if (len < 1 || len > max_buffer_size || !buf)
  2052. return -EINVAL;
  2053. if (hdev->ll_driver->output_report)
  2054. return hdev->ll_driver->output_report(hdev, buf, len);
  2055. return -ENOSYS;
  2056. }
  2057. EXPORT_SYMBOL_GPL(hid_hw_output_report);
  2058. #ifdef CONFIG_PM
  2059. int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
  2060. {
  2061. if (hdev->driver && hdev->driver->suspend)
  2062. return hdev->driver->suspend(hdev, state);
  2063. return 0;
  2064. }
  2065. EXPORT_SYMBOL_GPL(hid_driver_suspend);
  2066. int hid_driver_reset_resume(struct hid_device *hdev)
  2067. {
  2068. if (hdev->driver && hdev->driver->reset_resume)
  2069. return hdev->driver->reset_resume(hdev);
  2070. return 0;
  2071. }
  2072. EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
  2073. int hid_driver_resume(struct hid_device *hdev)
  2074. {
  2075. if (hdev->driver && hdev->driver->resume)
  2076. return hdev->driver->resume(hdev);
  2077. return 0;
  2078. }
  2079. EXPORT_SYMBOL_GPL(hid_driver_resume);
  2080. #endif /* CONFIG_PM */
  2081. struct hid_dynid {
  2082. struct list_head list;
  2083. struct hid_device_id id;
  2084. };
  2085. /**
  2086. * new_id_store - add a new HID device ID to this driver and re-probe devices
  2087. * @drv: target device driver
  2088. * @buf: buffer for scanning device ID data
  2089. * @count: input size
  2090. *
  2091. * Adds a new dynamic hid device ID to this driver,
  2092. * and causes the driver to probe for all devices again.
  2093. */
  2094. static ssize_t new_id_store(struct device_driver *drv, const char *buf,
  2095. size_t count)
  2096. {
  2097. struct hid_driver *hdrv = to_hid_driver(drv);
  2098. struct hid_dynid *dynid;
  2099. __u32 bus, vendor, product;
  2100. unsigned long driver_data = 0;
  2101. int ret;
  2102. ret = sscanf(buf, "%x %x %x %lx",
  2103. &bus, &vendor, &product, &driver_data);
  2104. if (ret < 3)
  2105. return -EINVAL;
  2106. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  2107. if (!dynid)
  2108. return -ENOMEM;
  2109. dynid->id.bus = bus;
  2110. dynid->id.group = HID_GROUP_ANY;
  2111. dynid->id.vendor = vendor;
  2112. dynid->id.product = product;
  2113. dynid->id.driver_data = driver_data;
  2114. spin_lock(&hdrv->dyn_lock);
  2115. list_add_tail(&dynid->list, &hdrv->dyn_list);
  2116. spin_unlock(&hdrv->dyn_lock);
  2117. ret = driver_attach(&hdrv->driver);
  2118. return ret ? : count;
  2119. }
  2120. static DRIVER_ATTR_WO(new_id);
  2121. static struct attribute *hid_drv_attrs[] = {
  2122. &driver_attr_new_id.attr,
  2123. NULL,
  2124. };
  2125. ATTRIBUTE_GROUPS(hid_drv);
  2126. static void hid_free_dynids(struct hid_driver *hdrv)
  2127. {
  2128. struct hid_dynid *dynid, *n;
  2129. spin_lock(&hdrv->dyn_lock);
  2130. list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
  2131. list_del(&dynid->list);
  2132. kfree(dynid);
  2133. }
  2134. spin_unlock(&hdrv->dyn_lock);
  2135. }
  2136. const struct hid_device_id *hid_match_device(struct hid_device *hdev,
  2137. struct hid_driver *hdrv)
  2138. {
  2139. struct hid_dynid *dynid;
  2140. spin_lock(&hdrv->dyn_lock);
  2141. list_for_each_entry(dynid, &hdrv->dyn_list, list) {
  2142. if (hid_match_one_id(hdev, &dynid->id)) {
  2143. spin_unlock(&hdrv->dyn_lock);
  2144. return &dynid->id;
  2145. }
  2146. }
  2147. spin_unlock(&hdrv->dyn_lock);
  2148. return hid_match_id(hdev, hdrv->id_table);
  2149. }
  2150. EXPORT_SYMBOL_GPL(hid_match_device);
  2151. static int hid_bus_match(struct device *dev, struct device_driver *drv)
  2152. {
  2153. struct hid_driver *hdrv = to_hid_driver(drv);
  2154. struct hid_device *hdev = to_hid_device(dev);
  2155. return hid_match_device(hdev, hdrv) != NULL;
  2156. }
  2157. /**
  2158. * hid_compare_device_paths - check if both devices share the same path
  2159. * @hdev_a: hid device
  2160. * @hdev_b: hid device
  2161. * @separator: char to use as separator
  2162. *
  2163. * Check if two devices share the same path up to the last occurrence of
  2164. * the separator char. Both paths must exist (i.e., zero-length paths
  2165. * don't match).
  2166. */
  2167. bool hid_compare_device_paths(struct hid_device *hdev_a,
  2168. struct hid_device *hdev_b, char separator)
  2169. {
  2170. int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
  2171. int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
  2172. if (n1 != n2 || n1 <= 0 || n2 <= 0)
  2173. return false;
  2174. return !strncmp(hdev_a->phys, hdev_b->phys, n1);
  2175. }
  2176. EXPORT_SYMBOL_GPL(hid_compare_device_paths);
  2177. static int hid_device_probe(struct device *dev)
  2178. {
  2179. struct hid_driver *hdrv = to_hid_driver(dev->driver);
  2180. struct hid_device *hdev = to_hid_device(dev);
  2181. const struct hid_device_id *id;
  2182. int ret = 0;
  2183. if (down_interruptible(&hdev->driver_input_lock)) {
  2184. ret = -EINTR;
  2185. goto end;
  2186. }
  2187. hdev->io_started = false;
  2188. clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
  2189. if (!hdev->driver) {
  2190. id = hid_match_device(hdev, hdrv);
  2191. if (id == NULL) {
  2192. ret = -ENODEV;
  2193. goto unlock;
  2194. }
  2195. if (hdrv->match) {
  2196. if (!hdrv->match(hdev, hid_ignore_special_drivers)) {
  2197. ret = -ENODEV;
  2198. goto unlock;
  2199. }
  2200. } else {
  2201. /*
  2202. * hid-generic implements .match(), so if
  2203. * hid_ignore_special_drivers is set, we can safely
  2204. * return.
  2205. */
  2206. if (hid_ignore_special_drivers) {
  2207. ret = -ENODEV;
  2208. goto unlock;
  2209. }
  2210. }
  2211. /* reset the quirks that has been previously set */
  2212. hdev->quirks = hid_lookup_quirk(hdev);
  2213. hdev->driver = hdrv;
  2214. if (hdrv->probe) {
  2215. ret = hdrv->probe(hdev, id);
  2216. } else { /* default probe */
  2217. ret = hid_open_report(hdev);
  2218. if (!ret)
  2219. ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
  2220. }
  2221. if (ret) {
  2222. hid_close_report(hdev);
  2223. hdev->driver = NULL;
  2224. }
  2225. }
  2226. unlock:
  2227. if (!hdev->io_started)
  2228. up(&hdev->driver_input_lock);
  2229. end:
  2230. return ret;
  2231. }
  2232. static void hid_device_remove(struct device *dev)
  2233. {
  2234. struct hid_device *hdev = to_hid_device(dev);
  2235. struct hid_driver *hdrv;
  2236. down(&hdev->driver_input_lock);
  2237. hdev->io_started = false;
  2238. hdrv = hdev->driver;
  2239. if (hdrv) {
  2240. if (hdrv->remove)
  2241. hdrv->remove(hdev);
  2242. else /* default remove */
  2243. hid_hw_stop(hdev);
  2244. hid_close_report(hdev);
  2245. hdev->driver = NULL;
  2246. }
  2247. if (!hdev->io_started)
  2248. up(&hdev->driver_input_lock);
  2249. }
  2250. static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
  2251. char *buf)
  2252. {
  2253. struct hid_device *hdev = container_of(dev, struct hid_device, dev);
  2254. return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
  2255. hdev->bus, hdev->group, hdev->vendor, hdev->product);
  2256. }
  2257. static DEVICE_ATTR_RO(modalias);
  2258. static struct attribute *hid_dev_attrs[] = {
  2259. &dev_attr_modalias.attr,
  2260. NULL,
  2261. };
  2262. static struct bin_attribute *hid_dev_bin_attrs[] = {
  2263. &dev_bin_attr_report_desc,
  2264. NULL
  2265. };
  2266. static const struct attribute_group hid_dev_group = {
  2267. .attrs = hid_dev_attrs,
  2268. .bin_attrs = hid_dev_bin_attrs,
  2269. };
  2270. __ATTRIBUTE_GROUPS(hid_dev);
  2271. static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
  2272. {
  2273. struct hid_device *hdev = to_hid_device(dev);
  2274. if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
  2275. hdev->bus, hdev->vendor, hdev->product))
  2276. return -ENOMEM;
  2277. if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
  2278. return -ENOMEM;
  2279. if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
  2280. return -ENOMEM;
  2281. if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
  2282. return -ENOMEM;
  2283. if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
  2284. hdev->bus, hdev->group, hdev->vendor, hdev->product))
  2285. return -ENOMEM;
  2286. return 0;
  2287. }
  2288. struct bus_type hid_bus_type = {
  2289. .name = "hid",
  2290. .dev_groups = hid_dev_groups,
  2291. .drv_groups = hid_drv_groups,
  2292. .match = hid_bus_match,
  2293. .probe = hid_device_probe,
  2294. .remove = hid_device_remove,
  2295. .uevent = hid_uevent,
  2296. };
  2297. EXPORT_SYMBOL(hid_bus_type);
  2298. int hid_add_device(struct hid_device *hdev)
  2299. {
  2300. static atomic_t id = ATOMIC_INIT(0);
  2301. int ret;
  2302. if (WARN_ON(hdev->status & HID_STAT_ADDED))
  2303. return -EBUSY;
  2304. hdev->quirks = hid_lookup_quirk(hdev);
  2305. /* we need to kill them here, otherwise they will stay allocated to
  2306. * wait for coming driver */
  2307. if (hid_ignore(hdev))
  2308. return -ENODEV;
  2309. /*
  2310. * Check for the mandatory transport channel.
  2311. */
  2312. if (!hdev->ll_driver->raw_request) {
  2313. hid_err(hdev, "transport driver missing .raw_request()\n");
  2314. return -EINVAL;
  2315. }
  2316. /*
  2317. * Read the device report descriptor once and use as template
  2318. * for the driver-specific modifications.
  2319. */
  2320. ret = hdev->ll_driver->parse(hdev);
  2321. if (ret)
  2322. return ret;
  2323. if (!hdev->dev_rdesc)
  2324. return -ENODEV;
  2325. /*
  2326. * Scan generic devices for group information
  2327. */
  2328. if (hid_ignore_special_drivers) {
  2329. hdev->group = HID_GROUP_GENERIC;
  2330. } else if (!hdev->group &&
  2331. !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
  2332. ret = hid_scan_report(hdev);
  2333. if (ret)
  2334. hid_warn(hdev, "bad device descriptor (%d)\n", ret);
  2335. }
  2336. hdev->id = atomic_inc_return(&id);
  2337. /* XXX hack, any other cleaner solution after the driver core
  2338. * is converted to allow more than 20 bytes as the device name? */
  2339. dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
  2340. hdev->vendor, hdev->product, hdev->id);
  2341. hid_debug_register(hdev, dev_name(&hdev->dev));
  2342. ret = device_add(&hdev->dev);
  2343. if (!ret)
  2344. hdev->status |= HID_STAT_ADDED;
  2345. else
  2346. hid_debug_unregister(hdev);
  2347. return ret;
  2348. }
  2349. EXPORT_SYMBOL_GPL(hid_add_device);
  2350. /**
  2351. * hid_allocate_device - allocate new hid device descriptor
  2352. *
  2353. * Allocate and initialize hid device, so that hid_destroy_device might be
  2354. * used to free it.
  2355. *
  2356. * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
  2357. * error value.
  2358. */
  2359. struct hid_device *hid_allocate_device(void)
  2360. {
  2361. struct hid_device *hdev;
  2362. int ret = -ENOMEM;
  2363. hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
  2364. if (hdev == NULL)
  2365. return ERR_PTR(ret);
  2366. device_initialize(&hdev->dev);
  2367. hdev->dev.release = hid_device_release;
  2368. hdev->dev.bus = &hid_bus_type;
  2369. device_enable_async_suspend(&hdev->dev);
  2370. hid_close_report(hdev);
  2371. init_waitqueue_head(&hdev->debug_wait);
  2372. INIT_LIST_HEAD(&hdev->debug_list);
  2373. spin_lock_init(&hdev->debug_list_lock);
  2374. sema_init(&hdev->driver_input_lock, 1);
  2375. mutex_init(&hdev->ll_open_lock);
  2376. return hdev;
  2377. }
  2378. EXPORT_SYMBOL_GPL(hid_allocate_device);
  2379. static void hid_remove_device(struct hid_device *hdev)
  2380. {
  2381. if (hdev->status & HID_STAT_ADDED) {
  2382. device_del(&hdev->dev);
  2383. hid_debug_unregister(hdev);
  2384. hdev->status &= ~HID_STAT_ADDED;
  2385. }
  2386. kfree(hdev->dev_rdesc);
  2387. hdev->dev_rdesc = NULL;
  2388. hdev->dev_rsize = 0;
  2389. }
  2390. /**
  2391. * hid_destroy_device - free previously allocated device
  2392. *
  2393. * @hdev: hid device
  2394. *
  2395. * If you allocate hid_device through hid_allocate_device, you should ever
  2396. * free by this function.
  2397. */
  2398. void hid_destroy_device(struct hid_device *hdev)
  2399. {
  2400. hid_remove_device(hdev);
  2401. put_device(&hdev->dev);
  2402. }
  2403. EXPORT_SYMBOL_GPL(hid_destroy_device);
  2404. static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
  2405. {
  2406. struct hid_driver *hdrv = data;
  2407. struct hid_device *hdev = to_hid_device(dev);
  2408. if (hdev->driver == hdrv &&
  2409. !hdrv->match(hdev, hid_ignore_special_drivers) &&
  2410. !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
  2411. return device_reprobe(dev);
  2412. return 0;
  2413. }
  2414. static int __hid_bus_driver_added(struct device_driver *drv, void *data)
  2415. {
  2416. struct hid_driver *hdrv = to_hid_driver(drv);
  2417. if (hdrv->match) {
  2418. bus_for_each_dev(&hid_bus_type, NULL, hdrv,
  2419. __hid_bus_reprobe_drivers);
  2420. }
  2421. return 0;
  2422. }
  2423. static int __bus_removed_driver(struct device_driver *drv, void *data)
  2424. {
  2425. return bus_rescan_devices(&hid_bus_type);
  2426. }
  2427. int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
  2428. const char *mod_name)
  2429. {
  2430. int ret;
  2431. hdrv->driver.name = hdrv->name;
  2432. hdrv->driver.bus = &hid_bus_type;
  2433. hdrv->driver.owner = owner;
  2434. hdrv->driver.mod_name = mod_name;
  2435. INIT_LIST_HEAD(&hdrv->dyn_list);
  2436. spin_lock_init(&hdrv->dyn_lock);
  2437. ret = driver_register(&hdrv->driver);
  2438. if (ret == 0)
  2439. bus_for_each_drv(&hid_bus_type, NULL, NULL,
  2440. __hid_bus_driver_added);
  2441. return ret;
  2442. }
  2443. EXPORT_SYMBOL_GPL(__hid_register_driver);
  2444. void hid_unregister_driver(struct hid_driver *hdrv)
  2445. {
  2446. driver_unregister(&hdrv->driver);
  2447. hid_free_dynids(hdrv);
  2448. bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
  2449. }
  2450. EXPORT_SYMBOL_GPL(hid_unregister_driver);
  2451. int hid_check_keys_pressed(struct hid_device *hid)
  2452. {
  2453. struct hid_input *hidinput;
  2454. int i;
  2455. if (!(hid->claimed & HID_CLAIMED_INPUT))
  2456. return 0;
  2457. list_for_each_entry(hidinput, &hid->inputs, list) {
  2458. for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
  2459. if (hidinput->input->key[i])
  2460. return 1;
  2461. }
  2462. return 0;
  2463. }
  2464. EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
  2465. static int __init hid_init(void)
  2466. {
  2467. int ret;
  2468. if (hid_debug)
  2469. pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
  2470. "debugfs is now used for inspecting the device (report descriptor, reports)\n");
  2471. ret = bus_register(&hid_bus_type);
  2472. if (ret) {
  2473. pr_err("can't register hid bus\n");
  2474. goto err;
  2475. }
  2476. ret = hidraw_init();
  2477. if (ret)
  2478. goto err_bus;
  2479. hid_debug_init();
  2480. return 0;
  2481. err_bus:
  2482. bus_unregister(&hid_bus_type);
  2483. err:
  2484. return ret;
  2485. }
  2486. static void __exit hid_exit(void)
  2487. {
  2488. hid_debug_exit();
  2489. hidraw_exit();
  2490. bus_unregister(&hid_bus_type);
  2491. hid_quirks_exit(HID_BUS_ANY);
  2492. }
  2493. module_init(hid_init);
  2494. module_exit(hid_exit);
  2495. MODULE_AUTHOR("Andreas Gal");
  2496. MODULE_AUTHOR("Vojtech Pavlik");
  2497. MODULE_AUTHOR("Jiri Kosina");
  2498. MODULE_LICENSE("GPL");