rmi_driver.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2011-2016 Synaptics Incorporated
  4. * Copyright (c) 2011 Unixphere
  5. *
  6. * This driver provides the core support for a single RMI4-based device.
  7. *
  8. * The RMI4 specification can be found here (URL split for line length):
  9. *
  10. * http://www.synaptics.com/sites/default/files/
  11. * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf
  12. */
  13. #include <linux/bitmap.h>
  14. #include <linux/delay.h>
  15. #include <linux/fs.h>
  16. #include <linux/irq.h>
  17. #include <linux/pm.h>
  18. #include <linux/slab.h>
  19. #include <linux/of.h>
  20. #include <linux/irqdomain.h>
  21. #include <uapi/linux/input.h>
  22. #include <linux/rmi.h>
  23. #include "rmi_bus.h"
  24. #include "rmi_driver.h"
  25. #define HAS_NONSTANDARD_PDT_MASK 0x40
  26. #define RMI4_MAX_PAGE 0xff
  27. #define RMI4_PAGE_SIZE 0x100
  28. #define RMI4_PAGE_MASK 0xFF00
  29. #define RMI_DEVICE_RESET_CMD 0x01
  30. #define DEFAULT_RESET_DELAY_MS 100
  31. void rmi_free_function_list(struct rmi_device *rmi_dev)
  32. {
  33. struct rmi_function *fn, *tmp;
  34. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  35. rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
  36. /* Doing it in the reverse order so F01 will be removed last */
  37. list_for_each_entry_safe_reverse(fn, tmp,
  38. &data->function_list, node) {
  39. list_del(&fn->node);
  40. rmi_unregister_function(fn);
  41. }
  42. devm_kfree(&rmi_dev->dev, data->irq_memory);
  43. data->irq_memory = NULL;
  44. data->irq_status = NULL;
  45. data->fn_irq_bits = NULL;
  46. data->current_irq_mask = NULL;
  47. data->new_irq_mask = NULL;
  48. data->f01_container = NULL;
  49. data->f34_container = NULL;
  50. }
  51. static int reset_one_function(struct rmi_function *fn)
  52. {
  53. struct rmi_function_handler *fh;
  54. int retval = 0;
  55. if (!fn || !fn->dev.driver)
  56. return 0;
  57. fh = to_rmi_function_handler(fn->dev.driver);
  58. if (fh->reset) {
  59. retval = fh->reset(fn);
  60. if (retval < 0)
  61. dev_err(&fn->dev, "Reset failed with code %d.\n",
  62. retval);
  63. }
  64. return retval;
  65. }
  66. static int configure_one_function(struct rmi_function *fn)
  67. {
  68. struct rmi_function_handler *fh;
  69. int retval = 0;
  70. if (!fn || !fn->dev.driver)
  71. return 0;
  72. fh = to_rmi_function_handler(fn->dev.driver);
  73. if (fh->config) {
  74. retval = fh->config(fn);
  75. if (retval < 0)
  76. dev_err(&fn->dev, "Config failed with code %d.\n",
  77. retval);
  78. }
  79. return retval;
  80. }
  81. static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev)
  82. {
  83. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  84. struct rmi_function *entry;
  85. int retval;
  86. list_for_each_entry(entry, &data->function_list, node) {
  87. retval = reset_one_function(entry);
  88. if (retval < 0)
  89. return retval;
  90. }
  91. return 0;
  92. }
  93. static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
  94. {
  95. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  96. struct rmi_function *entry;
  97. int retval;
  98. list_for_each_entry(entry, &data->function_list, node) {
  99. retval = configure_one_function(entry);
  100. if (retval < 0)
  101. return retval;
  102. }
  103. return 0;
  104. }
  105. static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
  106. {
  107. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  108. struct device *dev = &rmi_dev->dev;
  109. int i;
  110. int error;
  111. if (!data)
  112. return 0;
  113. if (!data->attn_data.data) {
  114. error = rmi_read_block(rmi_dev,
  115. data->f01_container->fd.data_base_addr + 1,
  116. data->irq_status, data->num_of_irq_regs);
  117. if (error < 0) {
  118. dev_err(dev, "Failed to read irqs, code=%d\n", error);
  119. return error;
  120. }
  121. }
  122. mutex_lock(&data->irq_mutex);
  123. bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
  124. data->irq_count);
  125. /*
  126. * At this point, irq_status has all bits that are set in the
  127. * interrupt status register and are enabled.
  128. */
  129. mutex_unlock(&data->irq_mutex);
  130. for_each_set_bit(i, data->irq_status, data->irq_count)
  131. handle_nested_irq(irq_find_mapping(data->irqdomain, i));
  132. if (data->input)
  133. input_sync(data->input);
  134. return 0;
  135. }
  136. void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
  137. void *data, size_t size)
  138. {
  139. struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
  140. struct rmi4_attn_data attn_data;
  141. void *fifo_data;
  142. if (!drvdata->enabled)
  143. return;
  144. fifo_data = kmemdup(data, size, GFP_ATOMIC);
  145. if (!fifo_data)
  146. return;
  147. attn_data.irq_status = irq_status;
  148. attn_data.size = size;
  149. attn_data.data = fifo_data;
  150. kfifo_put(&drvdata->attn_fifo, attn_data);
  151. }
  152. EXPORT_SYMBOL_GPL(rmi_set_attn_data);
  153. static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
  154. {
  155. struct rmi_device *rmi_dev = dev_id;
  156. struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
  157. struct rmi4_attn_data attn_data = {0};
  158. int ret, count;
  159. count = kfifo_get(&drvdata->attn_fifo, &attn_data);
  160. if (count) {
  161. *(drvdata->irq_status) = attn_data.irq_status;
  162. drvdata->attn_data = attn_data;
  163. }
  164. ret = rmi_process_interrupt_requests(rmi_dev);
  165. if (ret)
  166. rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
  167. "Failed to process interrupt request: %d\n", ret);
  168. if (count) {
  169. kfree(attn_data.data);
  170. drvdata->attn_data.data = NULL;
  171. }
  172. if (!kfifo_is_empty(&drvdata->attn_fifo))
  173. return rmi_irq_fn(irq, dev_id);
  174. return IRQ_HANDLED;
  175. }
  176. static int rmi_irq_init(struct rmi_device *rmi_dev)
  177. {
  178. struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
  179. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  180. int irq_flags = irq_get_trigger_type(pdata->irq);
  181. int ret;
  182. if (!irq_flags)
  183. irq_flags = IRQF_TRIGGER_LOW;
  184. ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
  185. rmi_irq_fn, irq_flags | IRQF_ONESHOT,
  186. dev_driver_string(rmi_dev->xport->dev),
  187. rmi_dev);
  188. if (ret < 0) {
  189. dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
  190. pdata->irq);
  191. return ret;
  192. }
  193. data->enabled = true;
  194. return 0;
  195. }
  196. struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number)
  197. {
  198. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  199. struct rmi_function *entry;
  200. list_for_each_entry(entry, &data->function_list, node) {
  201. if (entry->fd.function_number == number)
  202. return entry;
  203. }
  204. return NULL;
  205. }
  206. static int suspend_one_function(struct rmi_function *fn)
  207. {
  208. struct rmi_function_handler *fh;
  209. int retval = 0;
  210. if (!fn || !fn->dev.driver)
  211. return 0;
  212. fh = to_rmi_function_handler(fn->dev.driver);
  213. if (fh->suspend) {
  214. retval = fh->suspend(fn);
  215. if (retval < 0)
  216. dev_err(&fn->dev, "Suspend failed with code %d.\n",
  217. retval);
  218. }
  219. return retval;
  220. }
  221. static int rmi_suspend_functions(struct rmi_device *rmi_dev)
  222. {
  223. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  224. struct rmi_function *entry;
  225. int retval;
  226. list_for_each_entry(entry, &data->function_list, node) {
  227. retval = suspend_one_function(entry);
  228. if (retval < 0)
  229. return retval;
  230. }
  231. return 0;
  232. }
  233. static int resume_one_function(struct rmi_function *fn)
  234. {
  235. struct rmi_function_handler *fh;
  236. int retval = 0;
  237. if (!fn || !fn->dev.driver)
  238. return 0;
  239. fh = to_rmi_function_handler(fn->dev.driver);
  240. if (fh->resume) {
  241. retval = fh->resume(fn);
  242. if (retval < 0)
  243. dev_err(&fn->dev, "Resume failed with code %d.\n",
  244. retval);
  245. }
  246. return retval;
  247. }
  248. static int rmi_resume_functions(struct rmi_device *rmi_dev)
  249. {
  250. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  251. struct rmi_function *entry;
  252. int retval;
  253. list_for_each_entry(entry, &data->function_list, node) {
  254. retval = resume_one_function(entry);
  255. if (retval < 0)
  256. return retval;
  257. }
  258. return 0;
  259. }
  260. int rmi_enable_sensor(struct rmi_device *rmi_dev)
  261. {
  262. int retval = 0;
  263. retval = rmi_driver_process_config_requests(rmi_dev);
  264. if (retval < 0)
  265. return retval;
  266. return rmi_process_interrupt_requests(rmi_dev);
  267. }
  268. /**
  269. * rmi_driver_set_input_params - set input device id and other data.
  270. *
  271. * @rmi_dev: Pointer to an RMI device
  272. * @input: Pointer to input device
  273. *
  274. */
  275. static int rmi_driver_set_input_params(struct rmi_device *rmi_dev,
  276. struct input_dev *input)
  277. {
  278. input->name = SYNAPTICS_INPUT_DEVICE_NAME;
  279. input->id.vendor = SYNAPTICS_VENDOR_ID;
  280. input->id.bustype = BUS_RMI;
  281. return 0;
  282. }
  283. static void rmi_driver_set_input_name(struct rmi_device *rmi_dev,
  284. struct input_dev *input)
  285. {
  286. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  287. const char *device_name = rmi_f01_get_product_ID(data->f01_container);
  288. char *name;
  289. name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL,
  290. "Synaptics %s", device_name);
  291. if (!name)
  292. return;
  293. input->name = name;
  294. }
  295. static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
  296. unsigned long *mask)
  297. {
  298. int error = 0;
  299. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  300. struct device *dev = &rmi_dev->dev;
  301. mutex_lock(&data->irq_mutex);
  302. bitmap_or(data->new_irq_mask,
  303. data->current_irq_mask, mask, data->irq_count);
  304. error = rmi_write_block(rmi_dev,
  305. data->f01_container->fd.control_base_addr + 1,
  306. data->new_irq_mask, data->num_of_irq_regs);
  307. if (error < 0) {
  308. dev_err(dev, "%s: Failed to change enabled interrupts!",
  309. __func__);
  310. goto error_unlock;
  311. }
  312. bitmap_copy(data->current_irq_mask, data->new_irq_mask,
  313. data->num_of_irq_regs);
  314. bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
  315. error_unlock:
  316. mutex_unlock(&data->irq_mutex);
  317. return error;
  318. }
  319. static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
  320. unsigned long *mask)
  321. {
  322. int error = 0;
  323. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  324. struct device *dev = &rmi_dev->dev;
  325. mutex_lock(&data->irq_mutex);
  326. bitmap_andnot(data->fn_irq_bits,
  327. data->fn_irq_bits, mask, data->irq_count);
  328. bitmap_andnot(data->new_irq_mask,
  329. data->current_irq_mask, mask, data->irq_count);
  330. error = rmi_write_block(rmi_dev,
  331. data->f01_container->fd.control_base_addr + 1,
  332. data->new_irq_mask, data->num_of_irq_regs);
  333. if (error < 0) {
  334. dev_err(dev, "%s: Failed to change enabled interrupts!",
  335. __func__);
  336. goto error_unlock;
  337. }
  338. bitmap_copy(data->current_irq_mask, data->new_irq_mask,
  339. data->num_of_irq_regs);
  340. error_unlock:
  341. mutex_unlock(&data->irq_mutex);
  342. return error;
  343. }
  344. static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
  345. {
  346. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  347. int error;
  348. /*
  349. * Can get called before the driver is fully ready to deal with
  350. * this situation.
  351. */
  352. if (!data || !data->f01_container) {
  353. dev_warn(&rmi_dev->dev,
  354. "Not ready to handle reset yet!\n");
  355. return 0;
  356. }
  357. error = rmi_read_block(rmi_dev,
  358. data->f01_container->fd.control_base_addr + 1,
  359. data->current_irq_mask, data->num_of_irq_regs);
  360. if (error < 0) {
  361. dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n",
  362. __func__);
  363. return error;
  364. }
  365. error = rmi_driver_process_reset_requests(rmi_dev);
  366. if (error < 0)
  367. return error;
  368. error = rmi_driver_process_config_requests(rmi_dev);
  369. if (error < 0)
  370. return error;
  371. return 0;
  372. }
  373. static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
  374. struct pdt_entry *entry, u16 pdt_address)
  375. {
  376. u8 buf[RMI_PDT_ENTRY_SIZE];
  377. int error;
  378. error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE);
  379. if (error) {
  380. dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n",
  381. pdt_address, error);
  382. return error;
  383. }
  384. entry->page_start = pdt_address & RMI4_PAGE_MASK;
  385. entry->query_base_addr = buf[0];
  386. entry->command_base_addr = buf[1];
  387. entry->control_base_addr = buf[2];
  388. entry->data_base_addr = buf[3];
  389. entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK;
  390. entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5;
  391. entry->function_number = buf[5];
  392. return 0;
  393. }
  394. static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
  395. struct rmi_function_descriptor *fd)
  396. {
  397. fd->query_base_addr = pdt->query_base_addr + pdt->page_start;
  398. fd->command_base_addr = pdt->command_base_addr + pdt->page_start;
  399. fd->control_base_addr = pdt->control_base_addr + pdt->page_start;
  400. fd->data_base_addr = pdt->data_base_addr + pdt->page_start;
  401. fd->function_number = pdt->function_number;
  402. fd->interrupt_source_count = pdt->interrupt_source_count;
  403. fd->function_version = pdt->function_version;
  404. }
  405. #define RMI_SCAN_CONTINUE 0
  406. #define RMI_SCAN_DONE 1
  407. static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
  408. int page,
  409. int *empty_pages,
  410. void *ctx,
  411. int (*callback)(struct rmi_device *rmi_dev,
  412. void *ctx,
  413. const struct pdt_entry *entry))
  414. {
  415. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  416. struct pdt_entry pdt_entry;
  417. u16 page_start = RMI4_PAGE_SIZE * page;
  418. u16 pdt_start = page_start + PDT_START_SCAN_LOCATION;
  419. u16 pdt_end = page_start + PDT_END_SCAN_LOCATION;
  420. u16 addr;
  421. int error;
  422. int retval;
  423. for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) {
  424. error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr);
  425. if (error)
  426. return error;
  427. if (RMI4_END_OF_PDT(pdt_entry.function_number))
  428. break;
  429. retval = callback(rmi_dev, ctx, &pdt_entry);
  430. if (retval != RMI_SCAN_CONTINUE)
  431. return retval;
  432. }
  433. /*
  434. * Count number of empty PDT pages. If a gap of two pages
  435. * or more is found, stop scanning.
  436. */
  437. if (addr == pdt_start)
  438. ++*empty_pages;
  439. else
  440. *empty_pages = 0;
  441. return (data->bootloader_mode || *empty_pages >= 2) ?
  442. RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
  443. }
  444. int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
  445. int (*callback)(struct rmi_device *rmi_dev,
  446. void *ctx, const struct pdt_entry *entry))
  447. {
  448. int page;
  449. int empty_pages = 0;
  450. int retval = RMI_SCAN_DONE;
  451. for (page = 0; page <= RMI4_MAX_PAGE; page++) {
  452. retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
  453. ctx, callback);
  454. if (retval != RMI_SCAN_CONTINUE)
  455. break;
  456. }
  457. return retval < 0 ? retval : 0;
  458. }
  459. int rmi_read_register_desc(struct rmi_device *d, u16 addr,
  460. struct rmi_register_descriptor *rdesc)
  461. {
  462. int ret;
  463. u8 size_presence_reg;
  464. u8 buf[35];
  465. int presense_offset = 1;
  466. u8 *struct_buf;
  467. int reg;
  468. int offset = 0;
  469. int map_offset = 0;
  470. int i;
  471. int b;
  472. /*
  473. * The first register of the register descriptor is the size of
  474. * the register descriptor's presense register.
  475. */
  476. ret = rmi_read(d, addr, &size_presence_reg);
  477. if (ret)
  478. return ret;
  479. ++addr;
  480. if (size_presence_reg < 0 || size_presence_reg > 35)
  481. return -EIO;
  482. memset(buf, 0, sizeof(buf));
  483. /*
  484. * The presence register contains the size of the register structure
  485. * and a bitmap which identified which packet registers are present
  486. * for this particular register type (ie query, control, or data).
  487. */
  488. ret = rmi_read_block(d, addr, buf, size_presence_reg);
  489. if (ret)
  490. return ret;
  491. ++addr;
  492. if (buf[0] == 0) {
  493. presense_offset = 3;
  494. rdesc->struct_size = buf[1] | (buf[2] << 8);
  495. } else {
  496. rdesc->struct_size = buf[0];
  497. }
  498. for (i = presense_offset; i < size_presence_reg; i++) {
  499. for (b = 0; b < 8; b++) {
  500. if (buf[i] & (0x1 << b))
  501. bitmap_set(rdesc->presense_map, map_offset, 1);
  502. ++map_offset;
  503. }
  504. }
  505. rdesc->num_registers = bitmap_weight(rdesc->presense_map,
  506. RMI_REG_DESC_PRESENSE_BITS);
  507. rdesc->registers = devm_kcalloc(&d->dev,
  508. rdesc->num_registers,
  509. sizeof(struct rmi_register_desc_item),
  510. GFP_KERNEL);
  511. if (!rdesc->registers)
  512. return -ENOMEM;
  513. /*
  514. * Allocate a temporary buffer to hold the register structure.
  515. * I'm not using devm_kzalloc here since it will not be retained
  516. * after exiting this function
  517. */
  518. struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL);
  519. if (!struct_buf)
  520. return -ENOMEM;
  521. /*
  522. * The register structure contains information about every packet
  523. * register of this type. This includes the size of the packet
  524. * register and a bitmap of all subpackets contained in the packet
  525. * register.
  526. */
  527. ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size);
  528. if (ret)
  529. goto free_struct_buff;
  530. reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
  531. for (i = 0; i < rdesc->num_registers; i++) {
  532. struct rmi_register_desc_item *item = &rdesc->registers[i];
  533. int reg_size = struct_buf[offset];
  534. ++offset;
  535. if (reg_size == 0) {
  536. reg_size = struct_buf[offset] |
  537. (struct_buf[offset + 1] << 8);
  538. offset += 2;
  539. }
  540. if (reg_size == 0) {
  541. reg_size = struct_buf[offset] |
  542. (struct_buf[offset + 1] << 8) |
  543. (struct_buf[offset + 2] << 16) |
  544. (struct_buf[offset + 3] << 24);
  545. offset += 4;
  546. }
  547. item->reg = reg;
  548. item->reg_size = reg_size;
  549. map_offset = 0;
  550. do {
  551. for (b = 0; b < 7; b++) {
  552. if (struct_buf[offset] & (0x1 << b))
  553. bitmap_set(item->subpacket_map,
  554. map_offset, 1);
  555. ++map_offset;
  556. }
  557. } while (struct_buf[offset++] & 0x80);
  558. item->num_subpackets = bitmap_weight(item->subpacket_map,
  559. RMI_REG_DESC_SUBPACKET_BITS);
  560. rmi_dbg(RMI_DEBUG_CORE, &d->dev,
  561. "%s: reg: %d reg size: %ld subpackets: %d\n", __func__,
  562. item->reg, item->reg_size, item->num_subpackets);
  563. reg = find_next_bit(rdesc->presense_map,
  564. RMI_REG_DESC_PRESENSE_BITS, reg + 1);
  565. }
  566. free_struct_buff:
  567. kfree(struct_buf);
  568. return ret;
  569. }
  570. const struct rmi_register_desc_item *rmi_get_register_desc_item(
  571. struct rmi_register_descriptor *rdesc, u16 reg)
  572. {
  573. const struct rmi_register_desc_item *item;
  574. int i;
  575. for (i = 0; i < rdesc->num_registers; i++) {
  576. item = &rdesc->registers[i];
  577. if (item->reg == reg)
  578. return item;
  579. }
  580. return NULL;
  581. }
  582. size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
  583. {
  584. const struct rmi_register_desc_item *item;
  585. int i;
  586. size_t size = 0;
  587. for (i = 0; i < rdesc->num_registers; i++) {
  588. item = &rdesc->registers[i];
  589. size += item->reg_size;
  590. }
  591. return size;
  592. }
  593. /* Compute the register offset relative to the base address */
  594. int rmi_register_desc_calc_reg_offset(
  595. struct rmi_register_descriptor *rdesc, u16 reg)
  596. {
  597. const struct rmi_register_desc_item *item;
  598. int offset = 0;
  599. int i;
  600. for (i = 0; i < rdesc->num_registers; i++) {
  601. item = &rdesc->registers[i];
  602. if (item->reg == reg)
  603. return offset;
  604. ++offset;
  605. }
  606. return -1;
  607. }
  608. bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
  609. u8 subpacket)
  610. {
  611. return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS,
  612. subpacket) == subpacket;
  613. }
  614. static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
  615. const struct pdt_entry *pdt)
  616. {
  617. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  618. int ret;
  619. u8 status;
  620. if (pdt->function_number == 0x34 && pdt->function_version > 1) {
  621. ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
  622. if (ret) {
  623. dev_err(&rmi_dev->dev,
  624. "Failed to read F34 status: %d.\n", ret);
  625. return ret;
  626. }
  627. if (status & BIT(7))
  628. data->bootloader_mode = true;
  629. } else if (pdt->function_number == 0x01) {
  630. ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
  631. if (ret) {
  632. dev_err(&rmi_dev->dev,
  633. "Failed to read F01 status: %d.\n", ret);
  634. return ret;
  635. }
  636. if (status & BIT(6))
  637. data->bootloader_mode = true;
  638. }
  639. return 0;
  640. }
  641. static int rmi_count_irqs(struct rmi_device *rmi_dev,
  642. void *ctx, const struct pdt_entry *pdt)
  643. {
  644. int *irq_count = ctx;
  645. int ret;
  646. *irq_count += pdt->interrupt_source_count;
  647. ret = rmi_check_bootloader_mode(rmi_dev, pdt);
  648. if (ret < 0)
  649. return ret;
  650. return RMI_SCAN_CONTINUE;
  651. }
  652. int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
  653. const struct pdt_entry *pdt)
  654. {
  655. int error;
  656. if (pdt->function_number == 0x01) {
  657. u16 cmd_addr = pdt->page_start + pdt->command_base_addr;
  658. u8 cmd_buf = RMI_DEVICE_RESET_CMD;
  659. const struct rmi_device_platform_data *pdata =
  660. rmi_get_platform_data(rmi_dev);
  661. if (rmi_dev->xport->ops->reset) {
  662. error = rmi_dev->xport->ops->reset(rmi_dev->xport,
  663. cmd_addr);
  664. if (error)
  665. return error;
  666. return RMI_SCAN_DONE;
  667. }
  668. rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
  669. error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
  670. if (error) {
  671. dev_err(&rmi_dev->dev,
  672. "Initial reset failed. Code = %d.\n", error);
  673. return error;
  674. }
  675. mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS);
  676. return RMI_SCAN_DONE;
  677. }
  678. /* F01 should always be on page 0. If we don't find it there, fail. */
  679. return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV;
  680. }
  681. static int rmi_create_function(struct rmi_device *rmi_dev,
  682. void *ctx, const struct pdt_entry *pdt)
  683. {
  684. struct device *dev = &rmi_dev->dev;
  685. struct rmi_driver_data *data = dev_get_drvdata(dev);
  686. int *current_irq_count = ctx;
  687. struct rmi_function *fn;
  688. int i;
  689. int error;
  690. rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n",
  691. pdt->function_number);
  692. fn = kzalloc(sizeof(struct rmi_function) +
  693. BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long),
  694. GFP_KERNEL);
  695. if (!fn) {
  696. dev_err(dev, "Failed to allocate memory for F%02X\n",
  697. pdt->function_number);
  698. return -ENOMEM;
  699. }
  700. INIT_LIST_HEAD(&fn->node);
  701. rmi_driver_copy_pdt_to_fd(pdt, &fn->fd);
  702. fn->rmi_dev = rmi_dev;
  703. fn->num_of_irqs = pdt->interrupt_source_count;
  704. fn->irq_pos = *current_irq_count;
  705. *current_irq_count += fn->num_of_irqs;
  706. for (i = 0; i < fn->num_of_irqs; i++)
  707. set_bit(fn->irq_pos + i, fn->irq_mask);
  708. error = rmi_register_function(fn);
  709. if (error)
  710. return error;
  711. if (pdt->function_number == 0x01)
  712. data->f01_container = fn;
  713. else if (pdt->function_number == 0x34)
  714. data->f34_container = fn;
  715. list_add_tail(&fn->node, &data->function_list);
  716. return RMI_SCAN_CONTINUE;
  717. }
  718. void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
  719. {
  720. struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
  721. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  722. int irq = pdata->irq;
  723. int irq_flags;
  724. int retval;
  725. mutex_lock(&data->enabled_mutex);
  726. if (data->enabled)
  727. goto out;
  728. enable_irq(irq);
  729. data->enabled = true;
  730. if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
  731. retval = disable_irq_wake(irq);
  732. if (retval)
  733. dev_warn(&rmi_dev->dev,
  734. "Failed to disable irq for wake: %d\n",
  735. retval);
  736. }
  737. /*
  738. * Call rmi_process_interrupt_requests() after enabling irq,
  739. * otherwise we may lose interrupt on edge-triggered systems.
  740. */
  741. irq_flags = irq_get_trigger_type(pdata->irq);
  742. if (irq_flags & IRQ_TYPE_EDGE_BOTH)
  743. rmi_process_interrupt_requests(rmi_dev);
  744. out:
  745. mutex_unlock(&data->enabled_mutex);
  746. }
  747. void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
  748. {
  749. struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
  750. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  751. struct rmi4_attn_data attn_data = {0};
  752. int irq = pdata->irq;
  753. int retval, count;
  754. mutex_lock(&data->enabled_mutex);
  755. if (!data->enabled)
  756. goto out;
  757. data->enabled = false;
  758. disable_irq(irq);
  759. if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
  760. retval = enable_irq_wake(irq);
  761. if (retval)
  762. dev_warn(&rmi_dev->dev,
  763. "Failed to enable irq for wake: %d\n",
  764. retval);
  765. }
  766. /* make sure the fifo is clean */
  767. while (!kfifo_is_empty(&data->attn_fifo)) {
  768. count = kfifo_get(&data->attn_fifo, &attn_data);
  769. if (count)
  770. kfree(attn_data.data);
  771. }
  772. out:
  773. mutex_unlock(&data->enabled_mutex);
  774. }
  775. int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
  776. {
  777. int retval;
  778. retval = rmi_suspend_functions(rmi_dev);
  779. if (retval)
  780. dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
  781. retval);
  782. rmi_disable_irq(rmi_dev, enable_wake);
  783. return retval;
  784. }
  785. EXPORT_SYMBOL_GPL(rmi_driver_suspend);
  786. int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
  787. {
  788. int retval;
  789. rmi_enable_irq(rmi_dev, clear_wake);
  790. retval = rmi_resume_functions(rmi_dev);
  791. if (retval)
  792. dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
  793. retval);
  794. return retval;
  795. }
  796. EXPORT_SYMBOL_GPL(rmi_driver_resume);
  797. static int rmi_driver_remove(struct device *dev)
  798. {
  799. struct rmi_device *rmi_dev = to_rmi_device(dev);
  800. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  801. rmi_disable_irq(rmi_dev, false);
  802. irq_domain_remove(data->irqdomain);
  803. data->irqdomain = NULL;
  804. rmi_f34_remove_sysfs(rmi_dev);
  805. rmi_free_function_list(rmi_dev);
  806. return 0;
  807. }
  808. #ifdef CONFIG_OF
  809. static int rmi_driver_of_probe(struct device *dev,
  810. struct rmi_device_platform_data *pdata)
  811. {
  812. int retval;
  813. retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms,
  814. "syna,reset-delay-ms", 1);
  815. if (retval)
  816. return retval;
  817. return 0;
  818. }
  819. #else
  820. static inline int rmi_driver_of_probe(struct device *dev,
  821. struct rmi_device_platform_data *pdata)
  822. {
  823. return -ENODEV;
  824. }
  825. #endif
  826. int rmi_probe_interrupts(struct rmi_driver_data *data)
  827. {
  828. struct rmi_device *rmi_dev = data->rmi_dev;
  829. struct device *dev = &rmi_dev->dev;
  830. struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
  831. int irq_count = 0;
  832. size_t size;
  833. int retval;
  834. /*
  835. * We need to count the IRQs and allocate their storage before scanning
  836. * the PDT and creating the function entries, because adding a new
  837. * function can trigger events that result in the IRQ related storage
  838. * being accessed.
  839. */
  840. rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
  841. data->bootloader_mode = false;
  842. retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
  843. if (retval < 0) {
  844. dev_err(dev, "IRQ counting failed with code %d.\n", retval);
  845. return retval;
  846. }
  847. if (data->bootloader_mode)
  848. dev_warn(dev, "Device in bootloader mode.\n");
  849. /* Allocate and register a linear revmap irq_domain */
  850. data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
  851. &irq_domain_simple_ops,
  852. data);
  853. if (!data->irqdomain) {
  854. dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
  855. return -ENOMEM;
  856. }
  857. data->irq_count = irq_count;
  858. data->num_of_irq_regs = (data->irq_count + 7) / 8;
  859. size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
  860. data->irq_memory = devm_kcalloc(dev, size, 4, GFP_KERNEL);
  861. if (!data->irq_memory) {
  862. dev_err(dev, "Failed to allocate memory for irq masks.\n");
  863. return -ENOMEM;
  864. }
  865. data->irq_status = data->irq_memory + size * 0;
  866. data->fn_irq_bits = data->irq_memory + size * 1;
  867. data->current_irq_mask = data->irq_memory + size * 2;
  868. data->new_irq_mask = data->irq_memory + size * 3;
  869. return retval;
  870. }
  871. int rmi_init_functions(struct rmi_driver_data *data)
  872. {
  873. struct rmi_device *rmi_dev = data->rmi_dev;
  874. struct device *dev = &rmi_dev->dev;
  875. int irq_count = 0;
  876. int retval;
  877. rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
  878. retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
  879. if (retval < 0) {
  880. dev_err(dev, "Function creation failed with code %d.\n",
  881. retval);
  882. goto err_destroy_functions;
  883. }
  884. if (!data->f01_container) {
  885. dev_err(dev, "Missing F01 container!\n");
  886. retval = -EINVAL;
  887. goto err_destroy_functions;
  888. }
  889. retval = rmi_read_block(rmi_dev,
  890. data->f01_container->fd.control_base_addr + 1,
  891. data->current_irq_mask, data->num_of_irq_regs);
  892. if (retval < 0) {
  893. dev_err(dev, "%s: Failed to read current IRQ mask.\n",
  894. __func__);
  895. goto err_destroy_functions;
  896. }
  897. return 0;
  898. err_destroy_functions:
  899. rmi_free_function_list(rmi_dev);
  900. return retval;
  901. }
  902. static int rmi_driver_probe(struct device *dev)
  903. {
  904. struct rmi_driver *rmi_driver;
  905. struct rmi_driver_data *data;
  906. struct rmi_device_platform_data *pdata;
  907. struct rmi_device *rmi_dev;
  908. int retval;
  909. rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
  910. __func__);
  911. if (!rmi_is_physical_device(dev)) {
  912. rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n");
  913. return -ENODEV;
  914. }
  915. rmi_dev = to_rmi_device(dev);
  916. rmi_driver = to_rmi_driver(dev->driver);
  917. rmi_dev->driver = rmi_driver;
  918. pdata = rmi_get_platform_data(rmi_dev);
  919. if (rmi_dev->xport->dev->of_node) {
  920. retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata);
  921. if (retval)
  922. return retval;
  923. }
  924. data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL);
  925. if (!data)
  926. return -ENOMEM;
  927. INIT_LIST_HEAD(&data->function_list);
  928. data->rmi_dev = rmi_dev;
  929. dev_set_drvdata(&rmi_dev->dev, data);
  930. /*
  931. * Right before a warm boot, the sensor might be in some unusual state,
  932. * such as F54 diagnostics, or F34 bootloader mode after a firmware
  933. * or configuration update. In order to clear the sensor to a known
  934. * state and/or apply any updates, we issue a initial reset to clear any
  935. * previous settings and force it into normal operation.
  936. *
  937. * We have to do this before actually building the PDT because
  938. * the reflash updates (if any) might cause various registers to move
  939. * around.
  940. *
  941. * For a number of reasons, this initial reset may fail to return
  942. * within the specified time, but we'll still be able to bring up the
  943. * driver normally after that failure. This occurs most commonly in
  944. * a cold boot situation (where then firmware takes longer to come up
  945. * than from a warm boot) and the reset_delay_ms in the platform data
  946. * has been set too short to accommodate that. Since the sensor will
  947. * eventually come up and be usable, we don't want to just fail here
  948. * and leave the customer's device unusable. So we warn them, and
  949. * continue processing.
  950. */
  951. retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
  952. if (retval < 0)
  953. dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n");
  954. retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props);
  955. if (retval < 0) {
  956. /*
  957. * we'll print out a warning and continue since
  958. * failure to get the PDT properties is not a cause to fail
  959. */
  960. dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n",
  961. PDT_PROPERTIES_LOCATION, retval);
  962. }
  963. mutex_init(&data->irq_mutex);
  964. mutex_init(&data->enabled_mutex);
  965. retval = rmi_probe_interrupts(data);
  966. if (retval)
  967. goto err;
  968. if (rmi_dev->xport->input) {
  969. /*
  970. * The transport driver already has an input device.
  971. * In some cases it is preferable to reuse the transport
  972. * devices input device instead of creating a new one here.
  973. * One example is some HID touchpads report "pass-through"
  974. * button events are not reported by rmi registers.
  975. */
  976. data->input = rmi_dev->xport->input;
  977. } else {
  978. data->input = devm_input_allocate_device(dev);
  979. if (!data->input) {
  980. dev_err(dev, "%s: Failed to allocate input device.\n",
  981. __func__);
  982. retval = -ENOMEM;
  983. goto err;
  984. }
  985. rmi_driver_set_input_params(rmi_dev, data->input);
  986. data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
  987. "%s/input0", dev_name(dev));
  988. }
  989. retval = rmi_init_functions(data);
  990. if (retval)
  991. goto err;
  992. retval = rmi_f34_create_sysfs(rmi_dev);
  993. if (retval)
  994. goto err;
  995. if (data->input) {
  996. rmi_driver_set_input_name(rmi_dev, data->input);
  997. if (!rmi_dev->xport->input) {
  998. retval = input_register_device(data->input);
  999. if (retval) {
  1000. dev_err(dev, "%s: Failed to register input device.\n",
  1001. __func__);
  1002. goto err_destroy_functions;
  1003. }
  1004. }
  1005. }
  1006. retval = rmi_irq_init(rmi_dev);
  1007. if (retval < 0)
  1008. goto err_destroy_functions;
  1009. if (data->f01_container->dev.driver) {
  1010. /* Driver already bound, so enable ATTN now. */
  1011. retval = rmi_enable_sensor(rmi_dev);
  1012. if (retval)
  1013. goto err_disable_irq;
  1014. }
  1015. return 0;
  1016. err_disable_irq:
  1017. rmi_disable_irq(rmi_dev, false);
  1018. err_destroy_functions:
  1019. rmi_free_function_list(rmi_dev);
  1020. err:
  1021. return retval;
  1022. }
  1023. static struct rmi_driver rmi_physical_driver = {
  1024. .driver = {
  1025. .owner = THIS_MODULE,
  1026. .name = "rmi4_physical",
  1027. .bus = &rmi_bus_type,
  1028. .probe = rmi_driver_probe,
  1029. .remove = rmi_driver_remove,
  1030. },
  1031. .reset_handler = rmi_driver_reset_handler,
  1032. .clear_irq_bits = rmi_driver_clear_irq_bits,
  1033. .set_irq_bits = rmi_driver_set_irq_bits,
  1034. .set_input_params = rmi_driver_set_input_params,
  1035. };
  1036. bool rmi_is_physical_driver(struct device_driver *drv)
  1037. {
  1038. return drv == &rmi_physical_driver.driver;
  1039. }
  1040. int __init rmi_register_physical_driver(void)
  1041. {
  1042. int error;
  1043. error = driver_register(&rmi_physical_driver.driver);
  1044. if (error) {
  1045. pr_err("%s: driver register failed, code=%d.\n", __func__,
  1046. error);
  1047. return error;
  1048. }
  1049. return 0;
  1050. }
  1051. void __exit rmi_unregister_physical_driver(void)
  1052. {
  1053. driver_unregister(&rmi_physical_driver.driver);
  1054. }