qts_core.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s: " fmt, __func__
  6. #include <linux/vmalloc.h>
  7. #include <linux/uaccess.h>
  8. #include <linux/delay.h>
  9. #include <linux/input.h>
  10. #include <linux/input/mt.h>
  11. #include <linux/of_gpio.h>
  12. #include <linux/of_graph.h>
  13. #include <linux/of_device.h>
  14. #include <linux/sysfs.h>
  15. #include <linux/sort.h>
  16. #include <linux/atomic.h>
  17. #include <linux/pinctrl/qcom-pinctrl.h>
  18. #include <linux/pm.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/component.h>
  21. #include <linux/sched.h>
  22. #include <linux/version.h>
  23. #include <linux/types.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/wait.h>
  27. #include <linux/time.h>
  28. #include "qts_core.h"
  29. static struct qts_data_entries *qts_data_entries;
  30. struct drm_panel *active_panel;
  31. static void qts_trusted_touch_abort_handler(struct qts_data *qts_data, int error);
  32. static struct gh_acl_desc *qts_vm_get_acl(enum gh_vm_names vm_name)
  33. {
  34. struct gh_acl_desc *acl_desc;
  35. gh_vmid_t vmid;
  36. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  37. ghd_rm_get_vmid(vm_name, &vmid);
  38. #else
  39. gh_rm_get_vmid(vm_name, &vmid);
  40. #endif
  41. acl_desc = kzalloc(offsetof(struct gh_acl_desc, acl_entries[1]),
  42. GFP_KERNEL);
  43. if (!acl_desc)
  44. return ERR_PTR(ENOMEM);
  45. acl_desc->n_acl_entries = 1;
  46. acl_desc->acl_entries[0].vmid = vmid;
  47. acl_desc->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  48. return acl_desc;
  49. }
  50. static struct gh_sgl_desc *qts_vm_get_sgl(struct trusted_touch_vm_info *vm_info)
  51. {
  52. struct gh_sgl_desc *sgl_desc;
  53. int i;
  54. sgl_desc = kzalloc(offsetof(struct gh_sgl_desc,
  55. sgl_entries[vm_info->iomem_list_size]), GFP_KERNEL);
  56. if (!sgl_desc)
  57. return ERR_PTR(ENOMEM);
  58. sgl_desc->n_sgl_entries = vm_info->iomem_list_size;
  59. for (i = 0; i < vm_info->iomem_list_size; i++) {
  60. sgl_desc->sgl_entries[i].ipa_base = vm_info->iomem_bases[i];
  61. sgl_desc->sgl_entries[i].size = vm_info->iomem_sizes[i];
  62. }
  63. return sgl_desc;
  64. }
  65. static int qts_populate_vm_info_iomem(struct qts_data *qts_data)
  66. {
  67. int i, gpio, rc = 0;
  68. int num_regs, num_sizes, num_gpios, list_size;
  69. struct resource res;
  70. struct device_node *np = qts_data->dev->of_node;
  71. struct trusted_touch_vm_info *vm_info = qts_data->vm_info;
  72. num_regs = of_property_count_u32_elems(np, "qts,trusted-touch-io-bases");
  73. if (num_regs < 0) {
  74. pr_err("Invalid number of IO regions specified\n");
  75. return -EINVAL;
  76. }
  77. num_sizes = of_property_count_u32_elems(np, "qts,trusted-touch-io-sizes");
  78. if (num_sizes < 0) {
  79. pr_err("Invalid number of IO regions specified\n");
  80. return -EINVAL;
  81. }
  82. if (num_regs != num_sizes) {
  83. pr_err("IO bases and sizes array lengths mismatch\n");
  84. return -EINVAL;
  85. }
  86. num_gpios = of_gpio_named_count(np, "qts,trusted-touch-vm-gpio-list");
  87. if (num_gpios < 0) {
  88. pr_warn("Ignoring invalid trusted gpio list: %d\n", num_gpios);
  89. num_gpios = 0;
  90. }
  91. list_size = num_regs + num_gpios;
  92. vm_info->iomem_list_size = list_size;
  93. vm_info->iomem_bases = devm_kcalloc(qts_data->dev, list_size, sizeof(*vm_info->iomem_bases),
  94. GFP_KERNEL);
  95. if (!vm_info->iomem_bases)
  96. return -ENOMEM;
  97. vm_info->iomem_sizes = devm_kcalloc(qts_data->dev, list_size, sizeof(*vm_info->iomem_sizes),
  98. GFP_KERNEL);
  99. if (!vm_info->iomem_sizes)
  100. return -ENOMEM;
  101. for (i = 0; i < num_gpios; ++i) {
  102. gpio = of_get_named_gpio(np, "qts,trusted-touch-vm-gpio-list", i);
  103. if (gpio < 0 || !gpio_is_valid(gpio)) {
  104. pr_err("Invalid gpio %d at position %d\n", gpio, i);
  105. return gpio;
  106. }
  107. if (!msm_gpio_get_pin_address(gpio, &res)) {
  108. pr_err("Failed to retrieve gpio-%d resource\n", gpio);
  109. return -ENODATA;
  110. }
  111. vm_info->iomem_bases[i] = res.start;
  112. vm_info->iomem_sizes[i] = resource_size(&res);
  113. }
  114. rc = of_property_read_u32_array(np, "qts,trusted-touch-io-bases",
  115. &vm_info->iomem_bases[i], list_size - i);
  116. if (rc) {
  117. pr_err("Failed to read trusted touch io bases:%d\n", rc);
  118. return rc;
  119. }
  120. rc = of_property_read_u32_array(np, "qts,trusted-touch-io-sizes",
  121. &vm_info->iomem_sizes[i], list_size - i);
  122. if (rc) {
  123. pr_err("Failed to read trusted touch io sizes:%d\n", rc);
  124. return rc;
  125. }
  126. return 0;
  127. }
  128. static int qts_populate_vm_info(struct qts_data *qts_data)
  129. {
  130. int rc;
  131. struct trusted_touch_vm_info *vm_info;
  132. struct device_node *np = qts_data->dev->of_node;
  133. vm_info = devm_kzalloc(qts_data->dev, sizeof(struct trusted_touch_vm_info), GFP_KERNEL);
  134. if (!vm_info)
  135. return -ENOMEM;
  136. qts_data->vm_info = vm_info;
  137. vm_info->vm_name = GH_TRUSTED_VM;
  138. rc = of_property_read_u32(np, "qts,trusted-touch-spi-irq", &vm_info->hw_irq);
  139. if (rc) {
  140. pr_err("Failed to read trusted touch SPI irq:%d\n", rc);
  141. return rc;
  142. }
  143. rc = qts_populate_vm_info_iomem(qts_data);
  144. if (rc) {
  145. pr_err("Failed to read trusted touch mmio ranges:%d\n", rc);
  146. return rc;
  147. }
  148. rc = of_property_read_string(np, "qts,trusted-touch-type",
  149. &vm_info->trusted_touch_type);
  150. if (rc) {
  151. pr_warn("No trusted touch type selection mode\n");
  152. vm_info->mem_tag = GH_MEM_NOTIFIER_TAG_TOUCH_PRIMARY;
  153. vm_info->irq_label = GH_IRQ_LABEL_TRUSTED_TOUCH_PRIMARY;
  154. rc = 0;
  155. } else if (!strcmp(vm_info->trusted_touch_type, "primary")) {
  156. vm_info->mem_tag = GH_MEM_NOTIFIER_TAG_TOUCH_PRIMARY;
  157. vm_info->irq_label = GH_IRQ_LABEL_TRUSTED_TOUCH_PRIMARY;
  158. } else if (!strcmp(vm_info->trusted_touch_type, "secondary")) {
  159. vm_info->mem_tag = GH_MEM_NOTIFIER_TAG_TOUCH_SECONDARY;
  160. vm_info->irq_label = GH_IRQ_LABEL_TRUSTED_TOUCH_SECONDARY;
  161. }
  162. return 0;
  163. }
  164. static void qts_destroy_vm_info(struct qts_data *qts_data)
  165. {
  166. kfree(qts_data->vm_info->iomem_sizes);
  167. kfree(qts_data->vm_info->iomem_bases);
  168. kfree(qts_data->vm_info);
  169. }
  170. static void qts_vm_deinit(struct qts_data *qts_data)
  171. {
  172. if (qts_data->vm_info->mem_cookie)
  173. gh_mem_notifier_unregister(qts_data->vm_info->mem_cookie);
  174. qts_destroy_vm_info(qts_data);
  175. }
  176. static int qts_trusted_touch_get_vm_state(struct qts_data *qts_data)
  177. {
  178. return atomic_read(&qts_data->vm_info->vm_state);
  179. }
  180. static void qts_trusted_touch_set_vm_state(struct qts_data *qts_data,
  181. int state)
  182. {
  183. pr_debug("state %d\n", state);
  184. atomic_set(&qts_data->vm_info->vm_state, state);
  185. }
  186. #ifdef CONFIG_ARCH_QTI_VM
  187. static int qts_vm_mem_release(struct qts_data *qts_data);
  188. static void qts_trusted_touch_tvm_vm_mode_disable(struct qts_data *qts_data);
  189. static void qts_trusted_touch_abort_tvm(struct qts_data *qts_data);
  190. static void qts_trusted_touch_event_notify(struct qts_data *qts_data, int event);
  191. static void qts_irq_enable(struct qts_data *qts_data, bool en)
  192. {
  193. if (en) {
  194. if (qts_data->irq_disabled) {
  195. pr_debug("qts irq enable\n");
  196. enable_irq(qts_data->irq);
  197. qts_data->irq_disabled = false;
  198. }
  199. } else {
  200. if (!qts_data->irq_disabled) {
  201. pr_debug("qts irq disable\n");
  202. disable_irq_nosync(qts_data->irq);
  203. qts_data->irq_disabled = true;
  204. }
  205. }
  206. }
  207. static irqreturn_t qts_irq_handler(int irq, void *data)
  208. {
  209. struct qts_data *qts_data = data;
  210. if (!mutex_trylock(&qts_data->transition_lock))
  211. return IRQ_HANDLED;
  212. qts_data->vendor_ops.irq_handler(irq, qts_data->vendor_data);
  213. mutex_unlock(&qts_data->transition_lock);
  214. return IRQ_HANDLED;
  215. }
  216. static int qts_irq_registration(struct qts_data *qts_data)
  217. {
  218. int ret = 0;
  219. qts_data->irq_gpio_flags = IRQF_TRIGGER_RISING;
  220. pr_debug("irq:%d, flag:%x\n", qts_data->irq, qts_data->irq_gpio_flags);
  221. ret = request_threaded_irq(qts_data->irq, NULL, qts_irq_handler,
  222. qts_data->irq_gpio_flags | IRQF_ONESHOT,
  223. QTS_NAME, qts_data);
  224. if (ret != 0)
  225. pr_err("request_threaded_irq failed\n");
  226. if (ret == 0)
  227. qts_irq_enable(qts_data, false);
  228. return ret;
  229. }
  230. void qts_trusted_touch_tvm_i2c_failure_report(struct qts_data *qts_data)
  231. {
  232. pr_warn("initiating trusted touch abort due to i2c failure\n");
  233. qts_trusted_touch_abort_handler(qts_data, TRUSTED_TOUCH_EVENT_I2C_FAILURE);
  234. }
  235. static void qts_trusted_touch_reset_gpio_toggle(struct qts_data *qts_data)
  236. {
  237. void __iomem *base;
  238. if (qts_data->bus_type != QTS_BUS_TYPE_I2C)
  239. return;
  240. base = ioremap(TOUCH_RESET_GPIO_BASE, TOUCH_RESET_GPIO_SIZE);
  241. writel_relaxed(0x1, base + TOUCH_RESET_GPIO_OFFSET);
  242. /* wait until toggle to finish*/
  243. wmb();
  244. writel_relaxed(0x0, base + TOUCH_RESET_GPIO_OFFSET);
  245. /* wait until toggle to finish*/
  246. wmb();
  247. iounmap(base);
  248. }
  249. static void qts_trusted_touch_intr_gpio_toggle(struct qts_data *qts_data,
  250. bool enable)
  251. {
  252. void __iomem *base;
  253. u32 val;
  254. if (qts_data->bus_type != QTS_BUS_TYPE_I2C)
  255. return;
  256. base = ioremap(TOUCH_INTR_GPIO_BASE, TOUCH_INTR_GPIO_SIZE);
  257. val = readl_relaxed(base + TOUCH_RESET_GPIO_OFFSET);
  258. if (enable) {
  259. val |= BIT(0);
  260. writel_relaxed(val, base + TOUCH_INTR_GPIO_OFFSET);
  261. /* wait until toggle to finish*/
  262. wmb();
  263. } else {
  264. val &= ~BIT(0);
  265. writel_relaxed(val, base + TOUCH_INTR_GPIO_OFFSET);
  266. /* wait until toggle to finish*/
  267. wmb();
  268. }
  269. iounmap(base);
  270. }
  271. static int qts_sgl_cmp(const void *a, const void *b)
  272. {
  273. struct gh_sgl_entry *left = (struct gh_sgl_entry *)a;
  274. struct gh_sgl_entry *right = (struct gh_sgl_entry *)b;
  275. return (left->ipa_base - right->ipa_base);
  276. }
  277. static int qts_vm_compare_sgl_desc(struct gh_sgl_desc *expected,
  278. struct gh_sgl_desc *received)
  279. {
  280. int idx;
  281. if (expected->n_sgl_entries != received->n_sgl_entries)
  282. return -E2BIG;
  283. sort(received->sgl_entries, received->n_sgl_entries,
  284. sizeof(received->sgl_entries[0]), qts_sgl_cmp, NULL);
  285. sort(expected->sgl_entries, expected->n_sgl_entries,
  286. sizeof(expected->sgl_entries[0]), qts_sgl_cmp, NULL);
  287. for (idx = 0; idx < expected->n_sgl_entries; idx++) {
  288. struct gh_sgl_entry *left = &expected->sgl_entries[idx];
  289. struct gh_sgl_entry *right = &received->sgl_entries[idx];
  290. if ((left->ipa_base != right->ipa_base) ||
  291. (left->size != right->size)) {
  292. pr_err("sgl mismatch: left_base:%d right base:%d left size:%d right size:%d\n",
  293. left->ipa_base, right->ipa_base, left->size, right->size);
  294. return -EINVAL;
  295. }
  296. }
  297. return 0;
  298. }
  299. static int qts_vm_handle_vm_hardware(struct qts_data *qts_data)
  300. {
  301. int rc = 0;
  302. if (atomic_read(&qts_data->delayed_tvm_probe_pending)) {
  303. rc = qts_irq_registration(qts_data);
  304. if (rc) {
  305. pr_err("irq registration failure on TVM!\n");
  306. return rc;
  307. }
  308. atomic_set(&qts_data->delayed_tvm_probe_pending, 0);
  309. }
  310. qts_irq_enable(qts_data, true);
  311. qts_trusted_touch_set_vm_state(qts_data, TVM_INTERRUPT_ENABLED);
  312. return rc;
  313. }
  314. static void qts_trusted_touch_tvm_vm_mode_enable(struct qts_data *qts_data)
  315. {
  316. struct gh_sgl_desc *sgl_desc, *expected_sgl_desc;
  317. struct gh_acl_desc *acl_desc;
  318. struct irq_data *irq_data;
  319. int rc = 0;
  320. int irq = 0;
  321. mutex_lock(&qts_data->transition_lock);
  322. if (qts_trusted_touch_get_vm_state(qts_data) != TVM_ALL_RESOURCES_LENT_NOTIFIED) {
  323. pr_info("All lend notifications not received\n");
  324. qts_trusted_touch_event_notify(qts_data,
  325. TRUSTED_TOUCH_EVENT_NOTIFICATIONS_PENDING);
  326. mutex_unlock(&qts_data->transition_lock);
  327. return;
  328. }
  329. if (qts_data->vendor_ops.pre_le_tui_enable)
  330. qts_data->vendor_ops.pre_le_tui_enable(qts_data->vendor_data);
  331. acl_desc = qts_vm_get_acl(GH_TRUSTED_VM);
  332. if (IS_ERR(acl_desc)) {
  333. pr_err("failed to populated acl data:rc=%d\n", PTR_ERR(acl_desc));
  334. goto accept_fail;
  335. }
  336. sgl_desc = gh_rm_mem_accept(qts_data->vm_info->vm_mem_handle,
  337. GH_RM_MEM_TYPE_IO,
  338. GH_RM_TRANS_TYPE_LEND,
  339. GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
  340. GH_RM_MEM_ACCEPT_VALIDATE_LABEL |
  341. GH_RM_MEM_ACCEPT_DONE, TRUSTED_TOUCH_MEM_LABEL,
  342. acl_desc, NULL, NULL, 0);
  343. if (IS_ERR_OR_NULL(sgl_desc)) {
  344. pr_err("failed to do mem accept :rc=%d\n", PTR_ERR(sgl_desc));
  345. goto acl_fail;
  346. }
  347. qts_trusted_touch_set_vm_state(qts_data, TVM_IOMEM_ACCEPTED);
  348. /* Initiate session on tvm */
  349. if (qts_data->bus_type == QTS_BUS_TYPE_I2C)
  350. rc = pm_runtime_get_sync(qts_data->client->adapter->dev.parent);
  351. else
  352. rc = pm_runtime_get_sync(qts_data->spi->master->dev.parent);
  353. if (rc < 0) {
  354. pr_err("failed to get sync rc:%d\n", rc);
  355. goto sgl_fail;
  356. }
  357. qts_trusted_touch_set_vm_state(qts_data, TVM_I2C_SESSION_ACQUIRED);
  358. expected_sgl_desc = qts_vm_get_sgl(qts_data->vm_info);
  359. if (qts_vm_compare_sgl_desc(expected_sgl_desc, sgl_desc)) {
  360. pr_err("IO sg list does not match\n");
  361. goto sgl_cmp_fail;
  362. }
  363. kfree(expected_sgl_desc);
  364. kfree(acl_desc);
  365. kfree(sgl_desc);
  366. irq = gh_irq_accept(qts_data->vm_info->irq_label, -1, IRQ_TYPE_EDGE_RISING);
  367. qts_trusted_touch_intr_gpio_toggle(qts_data, false);
  368. if (irq < 0) {
  369. pr_err("failed to accept irq\n");
  370. goto accept_fail;
  371. }
  372. qts_trusted_touch_set_vm_state(qts_data, TVM_IRQ_ACCEPTED);
  373. irq_data = irq_get_irq_data(irq);
  374. if (!irq_data) {
  375. pr_err("Invalid irq data for trusted touch\n");
  376. goto accept_fail;
  377. }
  378. if (!irq_data->hwirq) {
  379. pr_err("Invalid irq in irq data\n");
  380. goto accept_fail;
  381. }
  382. if (irq_data->hwirq != qts_data->vm_info->hw_irq) {
  383. pr_err("Invalid irq lent\n");
  384. goto accept_fail;
  385. }
  386. pr_debug("irq:returned from accept:%d\n", irq);
  387. qts_data->irq = irq;
  388. rc = qts_vm_handle_vm_hardware(qts_data);
  389. if (rc) {
  390. pr_err("Delayed probe failure on TVM!\n");
  391. goto accept_fail;
  392. }
  393. atomic_set(&qts_data->trusted_touch_enabled, 1);
  394. if (qts_data->vendor_ops.post_le_tui_enable)
  395. qts_data->vendor_ops.post_le_tui_enable(qts_data->vendor_data);
  396. pr_info("Irq, iomem are accepted and trusted touch enabled\n");
  397. mutex_unlock(&qts_data->transition_lock);
  398. return;
  399. sgl_cmp_fail:
  400. kfree(expected_sgl_desc);
  401. sgl_fail:
  402. kfree(sgl_desc);
  403. acl_fail:
  404. kfree(acl_desc);
  405. accept_fail:
  406. qts_trusted_touch_abort_handler(qts_data,
  407. TRUSTED_TOUCH_EVENT_ACCEPT_FAILURE);
  408. mutex_unlock(&qts_data->transition_lock);
  409. }
  410. static void qts_vm_irq_on_lend_callback(void *data,
  411. unsigned long notif_type,
  412. enum gh_irq_label label)
  413. {
  414. struct qts_data *qts_data = data;
  415. pr_debug("received irq lend request for label:%d\n", label);
  416. if (qts_trusted_touch_get_vm_state(qts_data) == TVM_IOMEM_LENT_NOTIFIED)
  417. qts_trusted_touch_set_vm_state(qts_data, TVM_ALL_RESOURCES_LENT_NOTIFIED);
  418. else
  419. qts_trusted_touch_set_vm_state(qts_data, TVM_IRQ_LENT_NOTIFIED);
  420. }
  421. static void qts_vm_mem_on_lend_handler(enum gh_mem_notifier_tag tag,
  422. unsigned long notif_type, void *entry_data, void *notif_msg)
  423. {
  424. struct gh_rm_notif_mem_shared_payload *payload;
  425. struct trusted_touch_vm_info *vm_info;
  426. struct qts_data *qts_data;
  427. qts_data = (struct qts_data *)entry_data;
  428. vm_info = qts_data->vm_info;
  429. if (!vm_info) {
  430. pr_err("Invalid vm_info\n");
  431. return;
  432. }
  433. if (notif_type != GH_RM_NOTIF_MEM_SHARED ||
  434. tag != vm_info->mem_tag) {
  435. pr_err("Invalid command passed from rm\n");
  436. return;
  437. }
  438. if (!entry_data || !notif_msg) {
  439. pr_err("Invalid entry data passed from rm\n");
  440. return;
  441. }
  442. payload = (struct gh_rm_notif_mem_shared_payload *)notif_msg;
  443. if (payload->trans_type != GH_RM_TRANS_TYPE_LEND ||
  444. payload->label != TRUSTED_TOUCH_MEM_LABEL) {
  445. pr_err("Invalid label or transaction type\n");
  446. return;
  447. }
  448. vm_info->vm_mem_handle = payload->mem_handle;
  449. pr_debug("received mem lend request with handle:%d\n", vm_info->vm_mem_handle);
  450. if (qts_trusted_touch_get_vm_state(qts_data) == TVM_IRQ_LENT_NOTIFIED)
  451. qts_trusted_touch_set_vm_state(qts_data, TVM_ALL_RESOURCES_LENT_NOTIFIED);
  452. else
  453. qts_trusted_touch_set_vm_state(qts_data, TVM_IOMEM_LENT_NOTIFIED);
  454. }
  455. static int qts_vm_mem_release(struct qts_data *qts_data)
  456. {
  457. int rc = 0;
  458. if (!qts_data->vm_info->vm_mem_handle) {
  459. pr_err("Invalid memory handle\n");
  460. return -EINVAL;
  461. }
  462. rc = gh_rm_mem_release(qts_data->vm_info->vm_mem_handle, 0);
  463. if (rc)
  464. pr_err("VM mem release failed: rc=%d\n", rc);
  465. rc = gh_rm_mem_notify(qts_data->vm_info->vm_mem_handle,
  466. GH_RM_MEM_NOTIFY_OWNER_RELEASED,
  467. qts_data->vm_info->mem_tag, 0);
  468. if (rc)
  469. pr_err("Failed to notify mem release to PVM: rc=%d\n", rc);
  470. pr_debug("vm mem release success\n");
  471. qts_data->vm_info->vm_mem_handle = 0;
  472. return rc;
  473. }
  474. static void qts_trusted_touch_tvm_vm_mode_disable(struct qts_data *qts_data)
  475. {
  476. int rc = 0;
  477. mutex_lock(&qts_data->transition_lock);
  478. if (atomic_read(&qts_data->trusted_touch_abort_status)) {
  479. qts_trusted_touch_abort_tvm(qts_data);
  480. mutex_unlock(&qts_data->transition_lock);
  481. return;
  482. }
  483. if (qts_data->vendor_ops.pre_le_tui_disable)
  484. qts_data->vendor_ops.pre_le_tui_disable(qts_data->vendor_data);
  485. qts_irq_enable(qts_data, false);
  486. qts_trusted_touch_set_vm_state(qts_data, TVM_INTERRUPT_DISABLED);
  487. rc = gh_irq_release(qts_data->vm_info->irq_label);
  488. if (rc) {
  489. pr_err("Failed to release irq rc:%d\n", rc);
  490. goto error;
  491. } else {
  492. qts_trusted_touch_set_vm_state(qts_data, TVM_IRQ_RELEASED);
  493. }
  494. rc = gh_irq_release_notify(qts_data->vm_info->irq_label);
  495. if (rc)
  496. pr_err("Failed to notify release irq rc:%d\n", rc);
  497. pr_debug("vm irq release success\n");
  498. if (qts_data->bus_type == QTS_BUS_TYPE_I2C)
  499. pm_runtime_put_sync(qts_data->client->adapter->dev.parent);
  500. else
  501. pm_runtime_put_sync(qts_data->spi->master->dev.parent);
  502. qts_trusted_touch_set_vm_state(qts_data, TVM_I2C_SESSION_RELEASED);
  503. rc = qts_vm_mem_release(qts_data);
  504. if (rc) {
  505. pr_err("Failed to release mem rc:%d\n", rc);
  506. goto error;
  507. } else {
  508. qts_trusted_touch_set_vm_state(qts_data, TVM_IOMEM_RELEASED);
  509. }
  510. qts_trusted_touch_set_vm_state(qts_data, TRUSTED_TOUCH_TVM_INIT);
  511. atomic_set(&qts_data->trusted_touch_enabled, 0);
  512. if (qts_data->vendor_ops.post_le_tui_disable)
  513. qts_data->vendor_ops.post_le_tui_disable(qts_data->vendor_data);
  514. pr_info("Irq, iomem are released and trusted touch disabled\n");
  515. mutex_unlock(&qts_data->transition_lock);
  516. return;
  517. error:
  518. qts_trusted_touch_abort_handler(qts_data,
  519. TRUSTED_TOUCH_EVENT_RELEASE_FAILURE);
  520. mutex_unlock(&qts_data->transition_lock);
  521. }
  522. static int qts_handle_trusted_touch_tvm(struct qts_data *qts_data, int value)
  523. {
  524. int err = 0;
  525. switch (value) {
  526. case 0:
  527. if ((atomic_read(&qts_data->trusted_touch_enabled) == 0) &&
  528. (atomic_read(&qts_data->trusted_touch_abort_status) == 0)) {
  529. pr_err("Trusted touch is already disabled\n");
  530. break;
  531. }
  532. if (atomic_read(&qts_data->trusted_touch_mode) ==
  533. TRUSTED_TOUCH_VM_MODE) {
  534. qts_trusted_touch_tvm_vm_mode_disable(qts_data);
  535. } else {
  536. pr_err("Unsupported trusted touch mode\n");
  537. }
  538. break;
  539. case 1:
  540. if (atomic_read(&qts_data->trusted_touch_enabled)) {
  541. pr_err("Trusted touch usecase underway\n");
  542. err = -EBUSY;
  543. break;
  544. }
  545. if (atomic_read(&qts_data->trusted_touch_mode) ==
  546. TRUSTED_TOUCH_VM_MODE) {
  547. qts_trusted_touch_tvm_vm_mode_enable(qts_data);
  548. } else {
  549. pr_err("Unsupported trusted touch mode\n");
  550. }
  551. break;
  552. default:
  553. pr_err("unsupported value: %lu\n", value);
  554. err = -EINVAL;
  555. break;
  556. }
  557. return err;
  558. }
  559. static void qts_trusted_touch_abort_tvm(struct qts_data *qts_data)
  560. {
  561. int rc = 0;
  562. int vm_state = qts_trusted_touch_get_vm_state(qts_data);
  563. if (vm_state >= TRUSTED_TOUCH_TVM_STATE_MAX) {
  564. pr_err("invalid tvm driver state: %d\n", vm_state);
  565. return;
  566. }
  567. switch (vm_state) {
  568. case TVM_INTERRUPT_ENABLED:
  569. qts_irq_enable(qts_data, false);
  570. fallthrough;
  571. case TVM_IRQ_ACCEPTED:
  572. case TVM_INTERRUPT_DISABLED:
  573. rc = gh_irq_release(qts_data->vm_info->irq_label);
  574. if (rc)
  575. pr_err("Failed to release irq rc:%d\n", rc);
  576. rc = gh_irq_release_notify(qts_data->vm_info->irq_label);
  577. if (rc)
  578. pr_err("Failed to notify irq release rc:%d\n", rc);
  579. fallthrough;
  580. case TVM_I2C_SESSION_ACQUIRED:
  581. case TVM_IOMEM_ACCEPTED:
  582. case TVM_IRQ_RELEASED:
  583. if (qts_data->bus_type == QTS_BUS_TYPE_I2C)
  584. pm_runtime_put_sync(qts_data->client->adapter->dev.parent);
  585. else
  586. pm_runtime_put_sync(qts_data->spi->master->dev.parent);
  587. fallthrough;
  588. case TVM_I2C_SESSION_RELEASED:
  589. rc = qts_vm_mem_release(qts_data);
  590. if (rc)
  591. pr_err("Failed to release mem rc:%d\n", rc);
  592. fallthrough;
  593. case TVM_IOMEM_RELEASED:
  594. case TVM_ALL_RESOURCES_LENT_NOTIFIED:
  595. case TRUSTED_TOUCH_TVM_INIT:
  596. case TVM_IRQ_LENT_NOTIFIED:
  597. case TVM_IOMEM_LENT_NOTIFIED:
  598. atomic_set(&qts_data->trusted_touch_enabled, 0);
  599. }
  600. atomic_set(&qts_data->trusted_touch_abort_status, 0);
  601. qts_trusted_touch_set_vm_state(qts_data, TRUSTED_TOUCH_TVM_INIT);
  602. }
  603. #else
  604. static void qts_bus_put(struct qts_data *qts_data);
  605. static int qts_enable_reg(struct qts_data *qts_data, bool enable);
  606. static void qts_trusted_touch_abort_pvm(struct qts_data *qts_data)
  607. {
  608. int rc = 0;
  609. int vm_state = qts_trusted_touch_get_vm_state(qts_data);
  610. if (vm_state >= TRUSTED_TOUCH_PVM_STATE_MAX) {
  611. pr_err("Invalid driver state: %d\n", vm_state);
  612. return;
  613. }
  614. switch (vm_state) {
  615. case PVM_IRQ_RELEASE_NOTIFIED:
  616. case PVM_ALL_RESOURCES_RELEASE_NOTIFIED:
  617. case PVM_IRQ_LENT:
  618. case PVM_IRQ_LENT_NOTIFIED:
  619. rc = gh_irq_reclaim(qts_data->vm_info->irq_label);
  620. if (rc) {
  621. pr_err("failed to reclaim irq on pvm rc:%d\n", rc);
  622. return;
  623. }
  624. fallthrough;
  625. case PVM_IRQ_RECLAIMED:
  626. case PVM_IOMEM_LENT:
  627. case PVM_IOMEM_LENT_NOTIFIED:
  628. case PVM_IOMEM_RELEASE_NOTIFIED:
  629. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  630. rc = ghd_rm_mem_reclaim(qts_data->vm_info->vm_mem_handle, 0);
  631. #else
  632. rc = gh_rm_mem_reclaim(qts_data->vm_info->vm_mem_handle, 0);
  633. #endif
  634. if (rc) {
  635. pr_err("failed to reclaim iomem on pvm rc:%d\n", rc);
  636. qts_trusted_touch_set_vm_state(qts_data, PVM_IOMEM_RELEASE_NOTIFIED);
  637. return;
  638. }
  639. qts_data->vm_info->vm_mem_handle = 0;
  640. fallthrough;
  641. case PVM_IOMEM_RECLAIMED:
  642. case PVM_INTERRUPT_DISABLED:
  643. if (qts_data->vendor_ops.enable_touch_irq)
  644. qts_data->vendor_ops.enable_touch_irq(qts_data->vendor_data, true);
  645. fallthrough;
  646. case PVM_I2C_RESOURCE_ACQUIRED:
  647. case PVM_INTERRUPT_ENABLED:
  648. qts_bus_put(qts_data);
  649. fallthrough;
  650. case TRUSTED_TOUCH_PVM_INIT:
  651. case PVM_I2C_RESOURCE_RELEASED:
  652. atomic_set(&qts_data->trusted_touch_enabled, 0);
  653. atomic_set(&qts_data->trusted_touch_transition, 0);
  654. }
  655. atomic_set(&qts_data->trusted_touch_abort_status, 0);
  656. qts_trusted_touch_set_vm_state(qts_data, TRUSTED_TOUCH_PVM_INIT);
  657. }
  658. static int qts_clk_prepare_enable(struct qts_data *qts_data)
  659. {
  660. int ret;
  661. ret = clk_prepare_enable(qts_data->iface_clk);
  662. if (ret) {
  663. pr_err("error on clk_prepare_enable(iface_clk):%d\n", ret);
  664. return ret;
  665. }
  666. ret = clk_prepare_enable(qts_data->core_clk);
  667. if (ret) {
  668. clk_disable_unprepare(qts_data->iface_clk);
  669. pr_err("error clk_prepare_enable(core_clk):%d\n", ret);
  670. }
  671. return ret;
  672. }
  673. static void qts_clk_disable_unprepare(struct qts_data *qts_data)
  674. {
  675. clk_disable_unprepare(qts_data->core_clk);
  676. clk_disable_unprepare(qts_data->iface_clk);
  677. }
  678. static int qts_bus_get(struct qts_data *qts_data)
  679. {
  680. int rc = 0;
  681. struct device *dev = NULL;
  682. if (qts_data->schedule_suspend)
  683. cancel_work_sync(&qts_data->suspend_work);
  684. if (qts_data->schedule_resume)
  685. cancel_work_sync(&qts_data->resume_work);
  686. reinit_completion(&qts_data->trusted_touch_powerdown);
  687. qts_enable_reg(qts_data, true);
  688. if (qts_data->bus_type == QTS_BUS_TYPE_I2C)
  689. dev = qts_data->client->adapter->dev.parent;
  690. else
  691. dev = qts_data->spi->master->dev.parent;
  692. mutex_lock(&qts_data->qts_clk_io_ctrl_mutex);
  693. rc = pm_runtime_get_sync(dev);
  694. if (rc >= 0 && qts_data->core_clk != NULL &&
  695. qts_data->iface_clk != NULL) {
  696. rc = qts_clk_prepare_enable(qts_data);
  697. if (rc)
  698. pm_runtime_put_sync(dev);
  699. }
  700. mutex_unlock(&qts_data->qts_clk_io_ctrl_mutex);
  701. return rc;
  702. }
  703. static void qts_bus_put(struct qts_data *qts_data)
  704. {
  705. struct device *dev = NULL;
  706. if (qts_data->bus_type == QTS_BUS_TYPE_I2C)
  707. dev = qts_data->client->adapter->dev.parent;
  708. else
  709. dev = qts_data->spi->master->dev.parent;
  710. mutex_lock(&qts_data->qts_clk_io_ctrl_mutex);
  711. if (qts_data->core_clk != NULL && qts_data->iface_clk != NULL)
  712. qts_clk_disable_unprepare(qts_data);
  713. pm_runtime_put_sync(dev);
  714. mutex_unlock(&qts_data->qts_clk_io_ctrl_mutex);
  715. complete(&qts_data->trusted_touch_powerdown);
  716. qts_enable_reg(qts_data, false);
  717. }
  718. static struct gh_notify_vmid_desc *qts_vm_get_vmid(gh_vmid_t vmid)
  719. {
  720. struct gh_notify_vmid_desc *vmid_desc;
  721. vmid_desc = kzalloc(offsetof(struct gh_notify_vmid_desc,
  722. vmid_entries[1]), GFP_KERNEL);
  723. if (!vmid_desc)
  724. return ERR_PTR(ENOMEM);
  725. vmid_desc->n_vmid_entries = 1;
  726. vmid_desc->vmid_entries[0].vmid = vmid;
  727. return vmid_desc;
  728. }
  729. static void qts_trusted_touch_pvm_vm_mode_disable(struct qts_data *qts_data)
  730. {
  731. int rc = 0;
  732. atomic_set(&qts_data->trusted_touch_transition, 1);
  733. if (atomic_read(&qts_data->trusted_touch_abort_status)) {
  734. qts_trusted_touch_abort_pvm(qts_data);
  735. return;
  736. }
  737. if (qts_trusted_touch_get_vm_state(qts_data) != PVM_ALL_RESOURCES_RELEASE_NOTIFIED)
  738. pr_info("all release notifications are not received yet\n");
  739. if (qts_data->vendor_ops.pre_la_tui_disable)
  740. qts_data->vendor_ops.pre_la_tui_disable(qts_data->vendor_data);
  741. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  742. rc = ghd_rm_mem_reclaim(qts_data->vm_info->vm_mem_handle, 0);
  743. #else
  744. rc = gh_rm_mem_reclaim(qts_data->vm_info->vm_mem_handle, 0);
  745. #endif
  746. if (rc) {
  747. pr_err("Trusted touch VM mem reclaim failed rc:%d\n", rc);
  748. goto error;
  749. }
  750. qts_trusted_touch_set_vm_state(qts_data, PVM_IOMEM_RECLAIMED);
  751. qts_data->vm_info->vm_mem_handle = 0;
  752. pr_debug("vm mem reclaim success!\n");
  753. rc = gh_irq_reclaim(qts_data->vm_info->irq_label);
  754. if (rc) {
  755. pr_err("failed to reclaim irq on pvm rc:%d\n", rc);
  756. goto error;
  757. }
  758. qts_trusted_touch_set_vm_state(qts_data, PVM_IRQ_RECLAIMED);
  759. pr_debug("vm irq reclaim success!\n");
  760. if (qts_data->vendor_ops.enable_touch_irq)
  761. qts_data->vendor_ops.enable_touch_irq(qts_data->vendor_data, true);
  762. qts_trusted_touch_set_vm_state(qts_data, PVM_INTERRUPT_ENABLED);
  763. qts_bus_put(qts_data);
  764. atomic_set(&qts_data->trusted_touch_transition, 0);
  765. qts_trusted_touch_set_vm_state(qts_data, PVM_I2C_RESOURCE_RELEASED);
  766. qts_trusted_touch_set_vm_state(qts_data, TRUSTED_TOUCH_PVM_INIT);
  767. atomic_set(&qts_data->trusted_touch_enabled, 0);
  768. if (qts_data->vendor_ops.post_la_tui_disable)
  769. qts_data->vendor_ops.post_la_tui_disable(qts_data->vendor_data);
  770. pr_info("Irq, iomem are reclaimed and trusted touch disabled\n");
  771. return;
  772. error:
  773. qts_trusted_touch_abort_handler(qts_data,
  774. TRUSTED_TOUCH_EVENT_RECLAIM_FAILURE);
  775. }
  776. static void qts_vm_irq_on_release_callback(void *data,
  777. unsigned long notif_type,
  778. enum gh_irq_label label)
  779. {
  780. struct qts_data *qts_data = data;
  781. if (notif_type != GH_RM_NOTIF_VM_IRQ_RELEASED) {
  782. pr_err("invalid notification type\n");
  783. return;
  784. }
  785. if (qts_trusted_touch_get_vm_state(qts_data) == PVM_IOMEM_RELEASE_NOTIFIED)
  786. qts_trusted_touch_set_vm_state(qts_data, PVM_ALL_RESOURCES_RELEASE_NOTIFIED);
  787. else
  788. qts_trusted_touch_set_vm_state(qts_data, PVM_IRQ_RELEASE_NOTIFIED);
  789. }
  790. static void qts_vm_mem_on_release_handler(enum gh_mem_notifier_tag tag,
  791. unsigned long notif_type, void *entry_data, void *notif_msg)
  792. {
  793. struct gh_rm_notif_mem_released_payload *release_payload;
  794. struct trusted_touch_vm_info *vm_info;
  795. struct qts_data *qts_data;
  796. qts_data = (struct qts_data *)entry_data;
  797. vm_info = qts_data->vm_info;
  798. if (!vm_info) {
  799. pr_err("Invalid vm_info\n");
  800. return;
  801. }
  802. if (notif_type != GH_RM_NOTIF_MEM_RELEASED) {
  803. pr_err("Invalid notification type\n");
  804. return;
  805. }
  806. if (tag != vm_info->mem_tag) {
  807. pr_err("Invalid tag\n");
  808. return;
  809. }
  810. if (!entry_data || !notif_msg) {
  811. pr_err("Invalid data or notification message\n");
  812. return;
  813. }
  814. release_payload = (struct gh_rm_notif_mem_released_payload *)notif_msg;
  815. if (release_payload->mem_handle != vm_info->vm_mem_handle) {
  816. pr_err("Invalid mem handle detected\n");
  817. return;
  818. }
  819. if (qts_trusted_touch_get_vm_state(qts_data) == PVM_IRQ_RELEASE_NOTIFIED)
  820. qts_trusted_touch_set_vm_state(qts_data, PVM_ALL_RESOURCES_RELEASE_NOTIFIED);
  821. else
  822. qts_trusted_touch_set_vm_state(qts_data, PVM_IOMEM_RELEASE_NOTIFIED);
  823. }
  824. static int qts_vm_mem_lend(struct qts_data *qts_data)
  825. {
  826. struct gh_acl_desc *acl_desc;
  827. struct gh_sgl_desc *sgl_desc;
  828. struct gh_notify_vmid_desc *vmid_desc;
  829. gh_memparcel_handle_t mem_handle;
  830. gh_vmid_t trusted_vmid;
  831. int rc = 0;
  832. acl_desc = qts_vm_get_acl(GH_TRUSTED_VM);
  833. if (IS_ERR(acl_desc)) {
  834. pr_err("Failed to get acl of IO memories for Trusted touch\n");
  835. rc = PTR_ERR(acl_desc);
  836. return rc;
  837. }
  838. sgl_desc = qts_vm_get_sgl(qts_data->vm_info);
  839. if (IS_ERR(sgl_desc)) {
  840. pr_err("Failed to get sgl of IO memories for Trusted touch\n");
  841. rc = PTR_ERR(sgl_desc);
  842. goto sgl_error;
  843. }
  844. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  845. rc = ghd_rm_mem_lend(GH_RM_MEM_TYPE_IO, 0, TRUSTED_TOUCH_MEM_LABEL,
  846. acl_desc, sgl_desc, NULL, &mem_handle);
  847. #else
  848. rc = gh_rm_mem_lend(GH_RM_MEM_TYPE_IO, 0, TRUSTED_TOUCH_MEM_LABEL,
  849. acl_desc, sgl_desc, NULL, &mem_handle);
  850. #endif
  851. if (rc) {
  852. pr_err("Failed to lend IO memories for Trusted touch rc:%d\n", rc);
  853. goto error;
  854. }
  855. pr_debug("vm mem lend success\n");
  856. qts_trusted_touch_set_vm_state(qts_data, PVM_IOMEM_LENT);
  857. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  858. ghd_rm_get_vmid(GH_TRUSTED_VM, &trusted_vmid);
  859. #else
  860. gh_rm_get_vmid(GH_TRUSTED_VM, &trusted_vmid);
  861. #endif
  862. vmid_desc = qts_vm_get_vmid(trusted_vmid);
  863. rc = gh_rm_mem_notify(mem_handle, GH_RM_MEM_NOTIFY_RECIPIENT_SHARED,
  864. qts_data->vm_info->mem_tag, vmid_desc);
  865. if (rc) {
  866. pr_err("Failed to notify mem lend to hypervisor rc:%d\n", rc);
  867. goto vmid_error;
  868. }
  869. qts_trusted_touch_set_vm_state(qts_data, PVM_IOMEM_LENT_NOTIFIED);
  870. qts_data->vm_info->vm_mem_handle = mem_handle;
  871. vmid_error:
  872. kfree(vmid_desc);
  873. error:
  874. kfree(sgl_desc);
  875. sgl_error:
  876. kfree(acl_desc);
  877. return rc;
  878. }
  879. static int qts_trusted_touch_pvm_vm_mode_enable(struct qts_data *qts_data)
  880. {
  881. int rc = 0;
  882. struct trusted_touch_vm_info *vm_info = qts_data->vm_info;
  883. atomic_set(&qts_data->trusted_touch_transition, 1);
  884. mutex_lock(&qts_data->transition_lock);
  885. if (qts_data->suspended) {
  886. pr_err("Invalid power state for operation\n");
  887. atomic_set(&qts_data->trusted_touch_transition, 0);
  888. rc = -EPERM;
  889. goto error;
  890. }
  891. if (qts_data->vendor_ops.pre_la_tui_enable)
  892. qts_data->vendor_ops.pre_la_tui_enable(qts_data->vendor_data);
  893. /* i2c session start and resource acquire */
  894. if (qts_bus_get(qts_data) < 0) {
  895. pr_err("qts_bus_get failed\n");
  896. rc = -EIO;
  897. goto error;
  898. }
  899. qts_trusted_touch_set_vm_state(qts_data, PVM_I2C_RESOURCE_ACQUIRED);
  900. if (qts_data->vendor_ops.enable_touch_irq)
  901. qts_data->vendor_ops.enable_touch_irq(qts_data->vendor_data, false);
  902. qts_trusted_touch_set_vm_state(qts_data, PVM_INTERRUPT_DISABLED);
  903. rc = qts_vm_mem_lend(qts_data);
  904. if (rc) {
  905. pr_err("Failed to lend memory\n");
  906. goto abort_handler;
  907. }
  908. pr_debug("vm mem lend success\n");
  909. if (atomic_read(&qts_data->delayed_pvm_probe_pending)) {
  910. if (qts_data->vendor_ops.get_irq_num)
  911. qts_data->irq = qts_data->vendor_ops.get_irq_num(qts_data->vendor_data);
  912. atomic_set(&qts_data->delayed_tvm_probe_pending, 0);
  913. }
  914. rc = gh_irq_lend_v2(vm_info->irq_label, vm_info->vm_name,
  915. qts_data->irq, &qts_vm_irq_on_release_callback, qts_data);
  916. if (rc) {
  917. pr_err("Failed to lend irq\n");
  918. goto abort_handler;
  919. }
  920. pr_debug("vm irq lend success for irq:%d\n", qts_data->irq);
  921. qts_trusted_touch_set_vm_state(qts_data, PVM_IRQ_LENT);
  922. rc = gh_irq_lend_notify(vm_info->irq_label);
  923. if (rc) {
  924. pr_err("Failed to notify irq\n");
  925. goto abort_handler;
  926. }
  927. qts_trusted_touch_set_vm_state(qts_data, PVM_IRQ_LENT_NOTIFIED);
  928. if (qts_data->vendor_ops.post_la_tui_enable)
  929. qts_data->vendor_ops.post_la_tui_enable(qts_data->vendor_data);
  930. mutex_unlock(&qts_data->transition_lock);
  931. atomic_set(&qts_data->trusted_touch_transition, 0);
  932. atomic_set(&qts_data->trusted_touch_enabled, 1);
  933. pr_info("Irq, iomem are lent and trusted touch enabled\n");
  934. return rc;
  935. abort_handler:
  936. qts_trusted_touch_abort_handler(qts_data, TRUSTED_TOUCH_EVENT_LEND_FAILURE);
  937. error:
  938. mutex_unlock(&qts_data->transition_lock);
  939. return rc;
  940. }
  941. static int qts_handle_trusted_touch_pvm(struct qts_data *qts_data, int value)
  942. {
  943. int err = 0;
  944. switch (value) {
  945. case 0:
  946. if (atomic_read(&qts_data->trusted_touch_enabled) == 0 &&
  947. (atomic_read(&qts_data->trusted_touch_abort_status) == 0)) {
  948. pr_err("Trusted touch is already disabled\n");
  949. break;
  950. }
  951. if (atomic_read(&qts_data->trusted_touch_mode) ==
  952. TRUSTED_TOUCH_VM_MODE) {
  953. qts_trusted_touch_pvm_vm_mode_disable(qts_data);
  954. } else {
  955. pr_err("Unsupported trusted touch mode\n");
  956. }
  957. break;
  958. case 1:
  959. if (atomic_read(&qts_data->trusted_touch_enabled)) {
  960. pr_err("Trusted touch usecase underway\n");
  961. err = -EBUSY;
  962. break;
  963. }
  964. if (atomic_read(&qts_data->trusted_touch_mode) ==
  965. TRUSTED_TOUCH_VM_MODE) {
  966. err = qts_trusted_touch_pvm_vm_mode_enable(qts_data);
  967. } else {
  968. pr_err("Unsupported trusted touch mode\n");
  969. }
  970. break;
  971. default:
  972. pr_err("unsupported value: %lu\n", value);
  973. err = -EINVAL;
  974. break;
  975. }
  976. return err;
  977. }
  978. #endif
  979. static void qts_trusted_touch_event_notify(struct qts_data *qts_data, int event)
  980. {
  981. atomic_set(&qts_data->trusted_touch_event, event);
  982. sysfs_notify(&qts_data->dev->kobj, NULL, "trusted_touch_event");
  983. }
  984. static void qts_trusted_touch_abort_handler(struct qts_data *qts_data, int error)
  985. {
  986. atomic_set(&qts_data->trusted_touch_abort_status, error);
  987. pr_info("TUI session aborted with failure:%d\n", error);
  988. qts_trusted_touch_event_notify(qts_data, error);
  989. #ifdef CONFIG_ARCH_QTI_VM
  990. pr_info("Resetting touch controller\n");
  991. if (qts_trusted_touch_get_vm_state(qts_data) >= TVM_IOMEM_ACCEPTED &&
  992. error == TRUSTED_TOUCH_EVENT_I2C_FAILURE) {
  993. pr_info("Resetting touch controller\n");
  994. qts_trusted_touch_reset_gpio_toggle(qts_data);
  995. }
  996. #endif
  997. }
  998. static int qts_vm_init(struct qts_data *qts_data)
  999. {
  1000. int rc = 0;
  1001. struct trusted_touch_vm_info *vm_info;
  1002. void *mem_cookie;
  1003. rc = qts_populate_vm_info(qts_data);
  1004. if (rc) {
  1005. pr_err("Cannot setup vm pipeline\n");
  1006. rc = -EINVAL;
  1007. goto fail;
  1008. }
  1009. vm_info = qts_data->vm_info;
  1010. #ifdef CONFIG_ARCH_QTI_VM
  1011. mem_cookie = gh_mem_notifier_register(vm_info->mem_tag,
  1012. qts_vm_mem_on_lend_handler, qts_data);
  1013. if (!mem_cookie) {
  1014. pr_err("Failed to register on lend mem notifier\n");
  1015. rc = -EINVAL;
  1016. goto init_fail;
  1017. }
  1018. vm_info->mem_cookie = mem_cookie;
  1019. rc = gh_irq_wait_for_lend_v2(vm_info->irq_label, GH_PRIMARY_VM,
  1020. &qts_vm_irq_on_lend_callback, qts_data);
  1021. qts_trusted_touch_set_vm_state(qts_data, TRUSTED_TOUCH_TVM_INIT);
  1022. #else
  1023. mem_cookie = gh_mem_notifier_register(vm_info->mem_tag,
  1024. qts_vm_mem_on_release_handler, qts_data);
  1025. if (!mem_cookie) {
  1026. pr_err("Failed to register on release mem notifier\n");
  1027. rc = -EINVAL;
  1028. goto init_fail;
  1029. }
  1030. vm_info->mem_cookie = mem_cookie;
  1031. qts_trusted_touch_set_vm_state(qts_data, TRUSTED_TOUCH_PVM_INIT);
  1032. #endif
  1033. return rc;
  1034. init_fail:
  1035. qts_vm_deinit(qts_data);
  1036. fail:
  1037. return rc;
  1038. }
  1039. static void qts_dt_parse_trusted_touch_info(struct qts_data *qts_data)
  1040. {
  1041. struct device_node *np = qts_data->dev->of_node;
  1042. int rc = 0;
  1043. const char *selection;
  1044. const char *environment;
  1045. rc = of_property_read_string(np, "qts,trusted-touch-mode", &selection);
  1046. if (rc) {
  1047. pr_err("No trusted touch mode selection made\n");
  1048. atomic_set(&qts_data->trusted_touch_mode,
  1049. TRUSTED_TOUCH_MODE_NONE);
  1050. return;
  1051. }
  1052. if (!strcmp(selection, "vm_mode")) {
  1053. atomic_set(&qts_data->trusted_touch_mode,
  1054. TRUSTED_TOUCH_VM_MODE);
  1055. pr_debug("Selected trusted touch mode to VM mode\n");
  1056. } else {
  1057. atomic_set(&qts_data->trusted_touch_mode,
  1058. TRUSTED_TOUCH_MODE_NONE);
  1059. pr_err("Invalid trusted_touch mode\n");
  1060. }
  1061. rc = of_property_read_string(np, "qts,touch-environment",
  1062. &environment);
  1063. if (rc)
  1064. pr_err("No trusted touch mode environment\n");
  1065. qts_data->touch_environment = environment;
  1066. qts_data->tui_supported = true;
  1067. pr_debug("Trusted touch environment:%s\n", qts_data->touch_environment);
  1068. }
  1069. static void qts_trusted_touch_init(struct qts_data *qts_data)
  1070. {
  1071. int rc = 0;
  1072. atomic_set(&qts_data->trusted_touch_initialized, 0);
  1073. qts_dt_parse_trusted_touch_info(qts_data);
  1074. if (atomic_read(&qts_data->trusted_touch_mode) == TRUSTED_TOUCH_MODE_NONE)
  1075. return;
  1076. init_completion(&qts_data->trusted_touch_powerdown);
  1077. /* Get clocks */
  1078. qts_data->core_clk = devm_clk_get(qts_data->dev->parent, "m-ahb");
  1079. if (IS_ERR(qts_data->core_clk)) {
  1080. qts_data->core_clk = NULL;
  1081. pr_err("core_clk is not defined\n");
  1082. }
  1083. qts_data->iface_clk = devm_clk_get(qts_data->dev->parent, "se-clk");
  1084. if (IS_ERR(qts_data->iface_clk)) {
  1085. qts_data->iface_clk = NULL;
  1086. pr_err("iface_clk is not defined\n");
  1087. }
  1088. if (atomic_read(&qts_data->trusted_touch_mode) ==
  1089. TRUSTED_TOUCH_VM_MODE) {
  1090. rc = qts_vm_init(qts_data);
  1091. if (rc)
  1092. pr_err("Failed to init VM\n");
  1093. }
  1094. atomic_set(&qts_data->trusted_touch_initialized, 1);
  1095. }
  1096. static bool qts_ts_is_primary(struct kobject *kobj)
  1097. {
  1098. char *path = NULL;
  1099. if (!kobj)
  1100. return true;
  1101. path = kobject_get_path(kobj, GFP_KERNEL);
  1102. if (strstr(path, "primary"))
  1103. return true;
  1104. else
  1105. return false;
  1106. }
  1107. static ssize_t trusted_touch_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
  1108. char *buf)
  1109. {
  1110. struct qts_data *qts_data;
  1111. u32 idx = qts_ts_is_primary(kobj) ? 0 : 1;
  1112. qts_data = &qts_data_entries->info[idx];
  1113. return scnprintf(buf, PAGE_SIZE, "%d",
  1114. atomic_read(&qts_data->trusted_touch_enabled));
  1115. }
  1116. static ssize_t trusted_touch_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
  1117. const char *buf, size_t count)
  1118. {
  1119. struct qts_data *qts_data;
  1120. unsigned long value;
  1121. int err = 0;
  1122. u32 idx = qts_ts_is_primary(kobj) ? 0 : 1;
  1123. if (count > 2)
  1124. return -EINVAL;
  1125. err = kstrtoul(buf, 10, &value);
  1126. if (err != 0)
  1127. return err;
  1128. qts_data = &qts_data_entries->info[idx];
  1129. if (!atomic_read(&qts_data->trusted_touch_initialized))
  1130. return -EIO;
  1131. pr_info("TUI trusted_touch_enable:%d\n", value);
  1132. #ifdef CONFIG_ARCH_QTI_VM
  1133. err = qts_handle_trusted_touch_tvm(qts_data, value);
  1134. if (err) {
  1135. pr_err("Failed to handle trusted touch in tvm\n");
  1136. return -EINVAL;
  1137. }
  1138. #else
  1139. err = qts_handle_trusted_touch_pvm(qts_data, value);
  1140. if (err) {
  1141. pr_err("Failed to handle trusted touch in pvm\n");
  1142. return -EINVAL;
  1143. }
  1144. #endif
  1145. err = count;
  1146. return err;
  1147. }
  1148. static ssize_t trusted_touch_event_show(struct kobject *kobj, struct kobj_attribute *attr,
  1149. char *buf)
  1150. {
  1151. struct qts_data *qts_data;
  1152. u32 idx = qts_ts_is_primary(kobj) ? 0 : 1;
  1153. qts_data = &qts_data_entries->info[idx];
  1154. return scnprintf(buf, PAGE_SIZE, "%d",
  1155. atomic_read(&qts_data->trusted_touch_event));
  1156. }
  1157. static ssize_t trusted_touch_event_store(struct kobject *kobj, struct kobj_attribute *attr,
  1158. const char *buf, size_t count)
  1159. {
  1160. struct qts_data *qts_data;
  1161. unsigned long value;
  1162. int err = 0;
  1163. u32 idx = qts_ts_is_primary(kobj) ? 0 : 1;
  1164. if (count > 2)
  1165. return -EINVAL;
  1166. err = kstrtoul(buf, 10, &value);
  1167. if (err != 0)
  1168. return err;
  1169. qts_data = &qts_data_entries->info[idx];
  1170. if (!atomic_read(&qts_data->trusted_touch_initialized))
  1171. return -EIO;
  1172. if (value)
  1173. return -EIO;
  1174. atomic_set(&qts_data->trusted_touch_event, value);
  1175. return count;
  1176. }
  1177. static ssize_t trusted_touch_type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  1178. {
  1179. struct qts_data *qts_data;
  1180. u32 idx = qts_ts_is_primary(kobj) ? 0 : 1;
  1181. qts_data = &qts_data_entries->info[idx];
  1182. return scnprintf(buf, PAGE_SIZE, "%s", qts_data->vm_info->trusted_touch_type);
  1183. }
  1184. static ssize_t trusted_touch_device_path_show(struct kobject *kobj,
  1185. struct kobj_attribute *attr, char *buf)
  1186. {
  1187. struct qts_data *qts_data;
  1188. char *path = NULL;
  1189. u32 idx = qts_ts_is_primary(kobj) ? 0 : 1;
  1190. qts_data = &qts_data_entries->info[idx];
  1191. if (qts_data && qts_data->dev)
  1192. path = kobject_get_path(&qts_data->dev->kobj, GFP_KERNEL);
  1193. return scnprintf(buf, PAGE_SIZE, "%s", path ? path : "");
  1194. }
  1195. static struct kobj_attribute trusted_touch_enable_attr =
  1196. __ATTR(trusted_touch_enable, 0664, trusted_touch_enable_show, trusted_touch_enable_store);
  1197. static struct kobj_attribute trusted_touch_event_attr =
  1198. __ATTR(trusted_touch_event, 0664, trusted_touch_event_show, trusted_touch_event_store);
  1199. static struct kobj_attribute trusted_touch_type_attr =
  1200. __ATTR(trusted_touch_type, 0664, trusted_touch_type_show, NULL);
  1201. static struct kobj_attribute trusted_touch_device_path_attr =
  1202. __ATTR(trusted_touch_device_path, 0444, trusted_touch_device_path_show, NULL);
  1203. static struct attribute *qts_attributes[] = {
  1204. &trusted_touch_enable_attr.attr,
  1205. &trusted_touch_event_attr.attr,
  1206. &trusted_touch_type_attr.attr,
  1207. &trusted_touch_device_path_attr.attr,
  1208. NULL,
  1209. };
  1210. static struct attribute_group qts_attribute_group = {
  1211. .attrs = qts_attributes,
  1212. };
  1213. static int qts_create_sysfs(struct qts_data *qts_data)
  1214. {
  1215. int ret = 0;
  1216. struct kobject *qts_kobj;
  1217. struct kobject *client_kobj;
  1218. qts_kobj = &qts_data_entries->qts_kset->kobj;
  1219. if (qts_data->client_type == QTS_CLIENT_PRIMARY_TOUCH) {
  1220. client_kobj = kobject_create_and_add("primary", qts_kobj);
  1221. if (!client_kobj) {
  1222. pr_err("primary kobject_create_and_add failed\n");
  1223. return -ENOMEM;
  1224. }
  1225. ret = sysfs_create_group(client_kobj, &qts_attribute_group);
  1226. if (ret) {
  1227. pr_err("[EX]: sysfs_create_group() failed!!\n");
  1228. sysfs_remove_group(client_kobj, &qts_attribute_group);
  1229. return -ENOMEM;
  1230. }
  1231. } else if (qts_data->client_type == QTS_CLIENT_SECONDARY_TOUCH) {
  1232. client_kobj = kobject_create_and_add("secondary", qts_kobj);
  1233. if (!client_kobj) {
  1234. pr_err("secondary kobject_create_and_add failed\n");
  1235. return -ENOMEM;
  1236. }
  1237. ret = sysfs_create_group(client_kobj, &qts_attribute_group);
  1238. if (ret) {
  1239. pr_err("[EX]: sysfs_create_group() failed!!\n");
  1240. sysfs_remove_group(client_kobj, &qts_attribute_group);
  1241. return -ENOMEM;
  1242. }
  1243. }
  1244. pr_debug("sysfs_create_group() succeeded\n");
  1245. return ret;
  1246. }
  1247. static int qts_ts_check_dt(struct device_node *np)
  1248. {
  1249. int i;
  1250. int count;
  1251. struct device_node *node;
  1252. struct drm_panel *panel;
  1253. count = of_count_phandle_with_args(np, "panel", NULL);
  1254. if (count <= 0)
  1255. return 0;
  1256. for (i = 0; i < count; i++) {
  1257. node = of_parse_phandle(np, "panel", i);
  1258. panel = of_drm_find_panel(node);
  1259. of_node_put(node);
  1260. if (!IS_ERR(panel)) {
  1261. active_panel = panel;
  1262. return 0;
  1263. }
  1264. }
  1265. return PTR_ERR(panel);
  1266. }
  1267. static void qts_power_source_init(struct qts_data *qts_data)
  1268. {
  1269. qts_data->vdd = regulator_get(qts_data->dev, "vdd");
  1270. if (IS_ERR_OR_NULL(qts_data->vdd))
  1271. pr_debug("get vdd regulator failed\n");
  1272. qts_data->avdd = regulator_get(qts_data->dev, "avdd");
  1273. if (IS_ERR_OR_NULL(qts_data->avdd))
  1274. pr_debug("get avdd regulator failed\n");
  1275. }
  1276. #ifndef CONFIG_ARCH_QTI_VM
  1277. static int qts_enable_reg(struct qts_data *qts_data, bool enable)
  1278. {
  1279. int ret = 0;
  1280. if (IS_ERR_OR_NULL(qts_data->vdd)) {
  1281. pr_err("vdd is invalid\n");
  1282. return ret;
  1283. }
  1284. if (enable) {
  1285. ret = regulator_enable(qts_data->vdd);
  1286. if (ret) {
  1287. pr_err("enable vdd regulator failed,ret=%d\n", ret);
  1288. goto error;
  1289. }
  1290. if (!IS_ERR_OR_NULL(qts_data->avdd)) {
  1291. ret = regulator_enable(qts_data->avdd);
  1292. if (ret) {
  1293. pr_err("enable avdd regulator failed,ret=%d\n", ret);
  1294. goto error_avdd_en;
  1295. }
  1296. }
  1297. } else {
  1298. ret = regulator_disable(qts_data->vdd);
  1299. if (ret)
  1300. pr_err("disable vdd regulator failed,ret=%d\n", ret);
  1301. if (!IS_ERR_OR_NULL(qts_data->avdd)) {
  1302. ret = regulator_disable(qts_data->avdd);
  1303. if (ret)
  1304. pr_err("disable avdd regulator failed,ret=%d\n", ret);
  1305. }
  1306. }
  1307. pr_debug("enable %d completed\n", enable);
  1308. return ret;
  1309. error_avdd_en:
  1310. (void)regulator_disable(qts_data->vdd);
  1311. error:
  1312. return ret;
  1313. }
  1314. #endif
  1315. static void qts_ts_suspend(struct qts_data *qts_data)
  1316. {
  1317. int rc = 0;
  1318. if (qts_data->suspended) {
  1319. pr_warn("already in suspend state\n");
  1320. return;
  1321. }
  1322. if (qts_data->tui_supported) {
  1323. if (atomic_read(&qts_data->trusted_touch_transition)
  1324. || atomic_read(&qts_data->trusted_touch_enabled))
  1325. wait_for_completion_interruptible(&qts_data->trusted_touch_powerdown);
  1326. }
  1327. mutex_lock(&qts_data->transition_lock);
  1328. rc = qts_data->vendor_ops.suspend(qts_data->vendor_data);
  1329. if (rc)
  1330. pr_err("suspend failed, rc = %d\n", rc);
  1331. qts_data->suspended = true;
  1332. mutex_unlock(&qts_data->transition_lock);
  1333. }
  1334. static void qts_ts_resume(struct qts_data *qts_data)
  1335. {
  1336. int rc = 0;
  1337. if (!qts_data->suspended) {
  1338. pr_warn("Already in awake state\n");
  1339. return;
  1340. }
  1341. if (qts_data->tui_supported)
  1342. if (atomic_read(&qts_data->trusted_touch_transition))
  1343. wait_for_completion_interruptible(&qts_data->trusted_touch_powerdown);
  1344. mutex_lock(&qts_data->transition_lock);
  1345. rc = qts_data->vendor_ops.resume(qts_data->vendor_data);
  1346. if (rc)
  1347. pr_err("resume failed, rc = %d\n", rc);
  1348. qts_data->suspended = false;
  1349. mutex_unlock(&qts_data->transition_lock);
  1350. }
  1351. static void qts_resume_work(struct work_struct *work)
  1352. {
  1353. struct qts_data *qts_data = container_of(work, struct qts_data,
  1354. resume_work);
  1355. qts_ts_resume(qts_data);
  1356. }
  1357. static void qts_suspend_work(struct work_struct *work)
  1358. {
  1359. struct qts_data *qts_data = container_of(work, struct qts_data,
  1360. suspend_work);
  1361. qts_ts_suspend(qts_data);
  1362. }
  1363. static void qts_panel_notifier_callback(enum panel_event_notifier_tag tag,
  1364. struct panel_event_notification *notification, void *client_data)
  1365. {
  1366. struct qts_data *qts_data = client_data;
  1367. if (!notification) {
  1368. pr_err("Invalid notification\n");
  1369. return;
  1370. }
  1371. pr_debug("Notification type:%d, early_trigger:%d\n",
  1372. notification->notif_type, notification->notif_data.early_trigger);
  1373. switch (notification->notif_type) {
  1374. case DRM_PANEL_EVENT_UNBLANK:
  1375. if (notification->notif_data.early_trigger) {
  1376. pr_debug("resume notification pre commit\n");
  1377. } else {
  1378. if (qts_data->schedule_resume)
  1379. queue_work(qts_data->ts_workqueue, &qts_data->resume_work);
  1380. else
  1381. qts_ts_resume(qts_data);
  1382. }
  1383. break;
  1384. case DRM_PANEL_EVENT_BLANK:
  1385. if (notification->notif_data.early_trigger) {
  1386. if (qts_data->schedule_resume)
  1387. cancel_work_sync(&qts_data->resume_work);
  1388. if (qts_data->schedule_suspend)
  1389. queue_work(qts_data->ts_workqueue, &qts_data->suspend_work);
  1390. else
  1391. qts_ts_suspend(qts_data);
  1392. } else {
  1393. pr_debug("suspend notification post commit\n");
  1394. }
  1395. break;
  1396. case DRM_PANEL_EVENT_BLANK_LP:
  1397. pr_debug("received lp event\n");
  1398. break;
  1399. case DRM_PANEL_EVENT_FPS_CHANGE:
  1400. pr_debug("Received fps change old fps:%d new fps:%d\n",
  1401. notification->notif_data.old_fps,
  1402. notification->notif_data.new_fps);
  1403. break;
  1404. default:
  1405. pr_debug("notification serviced :%d\n",
  1406. notification->notif_type);
  1407. break;
  1408. }
  1409. }
  1410. static void qts_ts_register_for_panel_events(struct qts_data *qts_data)
  1411. {
  1412. void *cookie = NULL;
  1413. if (qts_data->client_type != QTS_CLIENT_PRIMARY_TOUCH) {
  1414. pr_err("Invalid touch type\n");
  1415. return;
  1416. }
  1417. cookie = panel_event_notifier_register(PANEL_EVENT_NOTIFICATION_PRIMARY,
  1418. PANEL_EVENT_NOTIFIER_CLIENT_PRIMARY_TOUCH, qts_data->panel,
  1419. &qts_panel_notifier_callback, qts_data);
  1420. if (!cookie) {
  1421. pr_err("Failed to register for panel events\n");
  1422. return;
  1423. }
  1424. pr_debug("registered for panel notifications panel: 0x%x\n", qts_data->panel);
  1425. qts_data->notifier_cookie = cookie;
  1426. }
  1427. int qts_client_register(struct qts_vendor_data qts_vendor_data)
  1428. {
  1429. struct qts_data *qts_data;
  1430. struct device_node *dp;
  1431. int rc = 0;
  1432. if (!qts_data_entries) {
  1433. pr_debug("QTS client register\n");
  1434. qts_data_entries = kzalloc(sizeof(*qts_data_entries), GFP_KERNEL);
  1435. if (!qts_data_entries) {
  1436. pr_err("mem allocation failed\n");
  1437. return -EPROBE_DEFER;
  1438. }
  1439. mutex_init(&qts_data_entries->qts_data_entries_lock);
  1440. qts_data_entries->qts_kset = kset_create_and_add("qts", NULL, kernel_kobj);
  1441. if (!qts_data_entries->qts_kset) {
  1442. pr_err("qts kset create failed\n");
  1443. return -ENOMEM;
  1444. }
  1445. }
  1446. mutex_lock(&qts_data_entries->qts_data_entries_lock);
  1447. if (qts_vendor_data.bus_type == QTS_BUS_TYPE_I2C)
  1448. dp = qts_vendor_data.client->dev.of_node;
  1449. else
  1450. dp = qts_vendor_data.spi->dev.of_node;
  1451. rc = qts_ts_check_dt(dp);
  1452. if (rc) {
  1453. pr_debug("qts_ts_check_dt failed, rc = %d\n", rc);
  1454. goto qts_register_end;
  1455. }
  1456. pr_debug("QTS client register starts\n");
  1457. qts_data = &qts_data_entries->info[qts_vendor_data.client_type];
  1458. qts_data->client = qts_vendor_data.client;
  1459. qts_data->spi = qts_vendor_data.spi;
  1460. if (qts_vendor_data.bus_type == QTS_BUS_TYPE_I2C)
  1461. qts_data->dev = &qts_data->client->dev;
  1462. else
  1463. qts_data->dev = &qts_data->spi->dev;
  1464. qts_data->bus_type = qts_vendor_data.bus_type;
  1465. qts_data->client_type = qts_vendor_data.client_type;
  1466. qts_data->dp = dp;
  1467. qts_data->vendor_data = qts_vendor_data.vendor_data;
  1468. qts_data->panel = active_panel;
  1469. qts_data->vendor_ops = qts_vendor_data.qts_vendor_ops;
  1470. qts_data->schedule_suspend = qts_vendor_data.schedule_suspend;
  1471. qts_data->schedule_resume = qts_vendor_data.schedule_resume;
  1472. qts_trusted_touch_init(qts_data);
  1473. mutex_init(&(qts_data->qts_clk_io_ctrl_mutex));
  1474. if (qts_data->tui_supported)
  1475. qts_create_sysfs(qts_data);
  1476. mutex_init(&qts_data->transition_lock);
  1477. #ifdef CONFIG_ARCH_QTI_VM
  1478. atomic_set(&qts_data->delayed_tvm_probe_pending, 1);
  1479. goto qts_register_end;
  1480. #else
  1481. atomic_set(&qts_data->delayed_pvm_probe_pending, 1);
  1482. #endif
  1483. qts_power_source_init(qts_data);
  1484. qts_data->ts_workqueue = create_singlethread_workqueue("qts_wq");
  1485. if (!qts_data->ts_workqueue)
  1486. pr_err("create qts workqueue fail\n");
  1487. if (qts_data->ts_workqueue && qts_data->schedule_resume)
  1488. INIT_WORK(&qts_data->resume_work, qts_resume_work);
  1489. if (qts_data->ts_workqueue && qts_data->schedule_suspend)
  1490. INIT_WORK(&qts_data->suspend_work, qts_suspend_work);
  1491. qts_ts_register_for_panel_events(qts_data);
  1492. qts_register_end:
  1493. pr_debug("client register end\n");
  1494. mutex_unlock(&qts_data_entries->qts_data_entries_lock);
  1495. return rc;
  1496. }
  1497. EXPORT_SYMBOL(qts_client_register);