input_booster.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. #define ITAG " [Input Booster] "
  2. #include <linux/input/input_booster.h>
  3. #include <linux/random.h>
  4. #include <linux/spinlock.h>
  5. #include <linux/syscalls.h>
  6. #include <linux/module.h>
  7. #include <linux/init.h>
  8. #if IS_ENABLED(CONFIG_SEC_INPUT_BOOSTER_QC) || \
  9. IS_ENABLED(CONFIG_SEC_INPUT_BOOSTER_SLSI) || \
  10. IS_ENABLED(CONFIG_SEC_INPUT_BOOSTER_MTK)
  11. spinlock_t write_ib_lock;
  12. spinlock_t write_qos_lock;
  13. struct mutex trigger_ib_lock;
  14. struct mutex mem_lock;
  15. struct mutex rel_ib_lock;
  16. struct mutex sip_rel_lock;
  17. struct workqueue_struct *ib_handle_highwq;
  18. int total_ib_cnt;
  19. int ib_init_succeed;
  20. int u_ib_mode;
  21. static int num_of_mode;
  22. int level_value = IB_MAX;
  23. unsigned int debug_flag;
  24. unsigned int enable_event_booster = INIT_ZERO;
  25. // Input Booster Init Variables
  26. int *release_val;
  27. int *cpu_cluster_policy;
  28. int *allowed_resources;
  29. int max_resource_count;
  30. int device_count;
  31. int max_cluster_count;
  32. int allowed_res_count;
  33. struct t_ib_device_tree* ib_device_trees;
  34. struct t_ib_trigger* ib_trigger;
  35. struct list_head* ib_list;
  36. struct list_head* qos_list;
  37. // @evdev_mt_slot : save the number of inputed touch slot.
  38. int evdev_mt_slot;
  39. // @evdev_mt_event[] : save count of each boooter's events.
  40. int evdev_mt_event[MAX_DEVICE_TYPE_NUM];
  41. int trigger_cnt;
  42. int send_ev_enable;
  43. struct t_ib_info* find_release_ib(int dev_type, int key_id);
  44. struct t_ib_info* create_ib_instance(struct t_ib_trigger* p_IbTrigger, int uniqId);
  45. bool is_validate_uniqid(unsigned int uniq_id);
  46. struct t_ib_target* find_update_target(int uniq_id, int res_id);
  47. long get_qos_value(int res_id);
  48. void remove_ib_instance(struct t_ib_info* ib);
  49. void trigger_input_booster(struct work_struct *work)
  50. {
  51. unsigned int uniq_id = 0;
  52. int res_type = -1;
  53. struct t_ib_info* ib;
  54. struct t_ib_trigger *p_IbTrigger = container_of(work, struct t_ib_trigger, ib_trigger_work);
  55. if (p_IbTrigger == NULL) {
  56. return;
  57. }
  58. mutex_lock(&trigger_ib_lock);
  59. // Input booster On/Off handling
  60. if (p_IbTrigger->event_type == BOOSTER_ON) {
  61. if (find_release_ib(p_IbTrigger->dev_type, p_IbTrigger->key_id) != NULL) {
  62. pr_err(ITAG" IB Trigger :: ib already exist. Key(%d)", p_IbTrigger->key_id);
  63. mutex_unlock(&trigger_ib_lock);
  64. return;
  65. }
  66. // Check if uniqId exits.
  67. do {
  68. uniq_id = total_ib_cnt++;
  69. if (total_ib_cnt == MAX_IB_COUNT)
  70. total_ib_cnt = 0;
  71. } while (!is_validate_uniqid(uniq_id));
  72. // Make ib instance with all needed factor.
  73. ib = create_ib_instance(p_IbTrigger, uniq_id);
  74. pr_info(ITAG" IB Trigger Press :: IB Uniq Id(%d)", uniq_id);
  75. if (ib == NULL) {
  76. mutex_unlock(&trigger_ib_lock);
  77. pr_err(ITAG" Creating ib object fail");
  78. return;
  79. }
  80. ib->press_flag = FLAG_ON;
  81. // When create ib instance, insert resource info in qos list with value 0.
  82. for (res_type = 0; res_type < allowed_res_count; res_type++) {
  83. if (allowed_resources[res_type] > max_resource_count) {
  84. pr_err(ITAG" allow res num(%d) exceeds over max res count",
  85. allowed_resources[res_type]);
  86. continue;
  87. }
  88. if (ib != NULL &&
  89. ib->ib_dt->res[allowed_resources[res_type]].head_value != 0) {
  90. struct t_ib_target* tv;
  91. tv = kmalloc(sizeof(struct t_ib_target), GFP_KERNEL);
  92. if (tv == NULL)
  93. continue;
  94. tv->uniq_id = ib->uniq_id;
  95. tv->value = 0;
  96. spin_lock(&write_qos_lock);
  97. list_add_tail_rcu(&(tv->list), &qos_list[allowed_resources[res_type]]);
  98. spin_unlock(&write_qos_lock);
  99. }
  100. }
  101. queue_work(ib_handle_highwq, &(ib->ib_state_work[IB_HEAD]));
  102. } else {
  103. /* Find ib instance in the list. if not, ignore this event.
  104. * if exists, Release flag on. Call ib's Release func.
  105. */
  106. mutex_lock(&sip_rel_lock);
  107. ib = find_release_ib(p_IbTrigger->dev_type, p_IbTrigger->key_id);
  108. mutex_lock(&mem_lock);
  109. if (ib == NULL) {
  110. pr_err(ITAG" IB is null on release");
  111. mutex_unlock(&mem_lock);
  112. mutex_unlock(&sip_rel_lock);
  113. mutex_unlock(&trigger_ib_lock);
  114. return;
  115. }
  116. mutex_unlock(&mem_lock);
  117. mutex_lock(&ib->lock);
  118. pr_info(ITAG" IB Trigger Release :: Uniq ID(%d)", ib->uniq_id);
  119. ib->rel_flag = FLAG_ON;
  120. if(ib->ib_dt->tail_time == 0) {
  121. pr_booster(" IB tail time is 0");
  122. mutex_unlock(&ib->lock);
  123. mutex_unlock(&sip_rel_lock);
  124. mutex_unlock(&trigger_ib_lock);
  125. return;
  126. }
  127. // If head operation is already finished, tail timeout work will be triggered.
  128. if (ib->isHeadFinished) {
  129. if (!delayed_work_pending(&(ib->ib_timeout_work[IB_TAIL]))) {
  130. queue_delayed_work(ib_handle_highwq,
  131. &(ib->ib_timeout_work[IB_TAIL]),
  132. msecs_to_jiffies(ib->ib_dt->tail_time));
  133. } else {
  134. cancel_delayed_work(&(ib->ib_timeout_work[IB_TAIL]));
  135. queue_delayed_work(ib_handle_highwq,
  136. &(ib->ib_timeout_work[IB_TAIL]),
  137. msecs_to_jiffies(ib->ib_dt->tail_time));
  138. }
  139. }
  140. mutex_unlock(&ib->lock);
  141. mutex_unlock(&sip_rel_lock);
  142. }
  143. mutex_unlock(&trigger_ib_lock);
  144. }
  145. struct t_ib_info* create_ib_instance(struct t_ib_trigger* p_IbTrigger, int uniqId)
  146. {
  147. struct t_ib_info* ib = kmalloc(sizeof(struct t_ib_info), GFP_KERNEL);
  148. int dev_type = p_IbTrigger->dev_type;
  149. if (ib == NULL)
  150. return NULL;
  151. ib->key_id = p_IbTrigger->key_id;
  152. ib->uniq_id = uniqId;
  153. ib->press_flag = FLAG_OFF;
  154. ib->rel_flag = FLAG_OFF;
  155. ib->isHeadFinished = 0;
  156. ib->ib_dt = &ib_device_trees[dev_type];
  157. INIT_WORK(&ib->ib_state_work[IB_HEAD], press_state_func);
  158. INIT_DELAYED_WORK(&ib->ib_timeout_work[IB_HEAD], press_timeout_func);
  159. INIT_WORK(&ib->ib_state_work[IB_TAIL], release_state_func);
  160. INIT_DELAYED_WORK(&ib->ib_timeout_work[IB_TAIL], release_timeout_func);
  161. mutex_init(&ib->lock);
  162. spin_lock(&write_ib_lock);
  163. list_add_tail_rcu(&(ib->list), &ib_list[dev_type]);
  164. spin_unlock(&write_ib_lock);
  165. return ib;
  166. }
  167. bool is_validate_uniqid(unsigned int uniq_id)
  168. {
  169. int dev_type;
  170. int cnt = 0;
  171. struct t_ib_info* ib = NULL;
  172. rcu_read_lock();
  173. for (dev_type = 0; dev_type < device_count; dev_type++) {
  174. if (list_empty(&ib_list[dev_type])) {
  175. pr_booster("IB List(%d) Empty", dev_type);
  176. continue;
  177. }
  178. list_for_each_entry_rcu(ib, &ib_list[dev_type], list) {
  179. cnt++;
  180. if (ib != NULL && ib->uniq_id == uniq_id) {
  181. rcu_read_unlock();
  182. pr_booster("uniq id find :: IB Idx(%d) old(%d) new(%d)", cnt, ib->uniq_id, uniq_id);
  183. return false;
  184. }
  185. }
  186. cnt = 0;
  187. }
  188. rcu_read_unlock();
  189. return true;
  190. }
  191. struct t_ib_info* find_release_ib(int dev_type, int key_id)
  192. {
  193. struct t_ib_info* ib = NULL;
  194. rcu_read_lock();
  195. if (list_empty(&ib_list[dev_type])) {
  196. rcu_read_unlock();
  197. pr_booster("Release IB(%d) Not Exist & List Empty", key_id);
  198. return NULL;
  199. }
  200. list_for_each_entry_rcu(ib, &ib_list[dev_type], list) {
  201. if (ib != NULL && ib->key_id == key_id && ib->rel_flag == FLAG_OFF) {
  202. rcu_read_unlock();
  203. pr_booster("Release IB(%d) Found", key_id);
  204. return ib;
  205. }
  206. }
  207. rcu_read_unlock();
  208. pr_booster("Release IB(%d) Not Exist", key_id);
  209. return NULL;
  210. }
  211. void press_state_func(struct work_struct* work)
  212. {
  213. struct t_ib_res_info res;
  214. struct t_ib_target* tv;
  215. long qos_values[MAX_RES_COUNT] = {0, };
  216. int res_type = 0;
  217. struct t_ib_info* target_ib = container_of(work, struct t_ib_info, ib_state_work[IB_HEAD]);
  218. pr_info(ITAG" Press State Func :::: Unique_Id(%d)", target_ib->uniq_id);
  219. // Get_Res_List(head) and update head value.
  220. for (res_type = 0; res_type < allowed_res_count; res_type++) {
  221. res = target_ib->ib_dt->res[allowed_resources[res_type]];
  222. if (res.head_value == 0)
  223. continue;
  224. // Find already added target value instance and update value as a head.
  225. tv = find_update_target(target_ib->uniq_id, res.res_id);
  226. if (tv == NULL) {
  227. pr_err(ITAG"Press State Func :::: %d's tv(%d) is null T.T",
  228. target_ib->uniq_id, res.res_id);
  229. continue;
  230. }
  231. tv->value = res.head_value;
  232. pr_booster("Press State Func :::: Uniq(%d)'s Update Res(%d) Head Val(%d)",
  233. tv->uniq_id, res.res_id, res.head_value);
  234. qos_values[res.res_id] = get_qos_value(res.res_id);
  235. }
  236. ib_set_booster(qos_values);
  237. pr_booster("Press State Func :::: Press Delay Time(%lu)",
  238. msecs_to_jiffies(target_ib->ib_dt->head_time));
  239. queue_delayed_work(ib_handle_highwq, &(target_ib->ib_timeout_work[IB_HEAD]),
  240. msecs_to_jiffies(target_ib->ib_dt->head_time));
  241. }
  242. void press_timeout_func(struct work_struct* work)
  243. {
  244. struct t_ib_info* target_ib = container_of(work, struct t_ib_info, ib_timeout_work[IB_HEAD].work);
  245. if (!target_ib)
  246. return;
  247. pr_info(ITAG" Press Timeout Func :::: Unique_Id(%d) Tail_Time(%d)",
  248. target_ib->uniq_id, target_ib->ib_dt->tail_time);
  249. int res_type;
  250. struct t_ib_res_info res;
  251. struct t_ib_target* tv;
  252. long qos_values[MAX_RES_COUNT] = {0, };
  253. long rel_flags[MAX_RES_COUNT] = {0, };
  254. mutex_lock(&sip_rel_lock);
  255. if (target_ib->ib_dt->tail_time != 0) {
  256. mutex_lock(&target_ib->lock);
  257. queue_work(ib_handle_highwq, &(target_ib->ib_state_work[IB_TAIL]));
  258. mutex_unlock(&target_ib->lock);
  259. } else {
  260. //NO TAIL Scenario : Delete Ib instance and free all memory space.
  261. for (res_type = 0; res_type < allowed_res_count; res_type++) {
  262. res = target_ib->ib_dt->res[allowed_resources[res_type]];
  263. tv = find_update_target(target_ib->uniq_id, res.res_id);
  264. if (tv == NULL) {
  265. pr_err(ITAG" Press Timeout Func :::: %d's TV No Exist(%d)",
  266. target_ib->uniq_id, res.res_id);
  267. continue;
  268. }
  269. spin_lock(&write_qos_lock);
  270. list_del_rcu(&(tv->list));
  271. spin_unlock(&write_qos_lock);
  272. synchronize_rcu();
  273. kfree(tv);
  274. tv = NULL;
  275. rcu_read_lock();
  276. if (!list_empty(&qos_list[res.res_id])) {
  277. rcu_read_unlock();
  278. qos_values[res.res_id] = get_qos_value(res.res_id);
  279. pr_booster("Press Timeout ::: Remove Val Cuz No Tail ::: Uniq(%d) Res(%d) Qos Val(%ld)",
  280. target_ib->uniq_id, res.res_id, qos_values[res.res_id]);
  281. }
  282. else {
  283. rcu_read_unlock();
  284. rel_flags[res.res_id] = 1;
  285. pr_booster("Press Timeout ::: Uniq(%d) Release Booster(%d) ::: No Tail and List Empty",
  286. target_ib->uniq_id, res.res_id);
  287. }
  288. }
  289. mutex_lock(&rel_ib_lock);
  290. ib_release_booster(rel_flags);
  291. ib_set_booster(qos_values);
  292. mutex_unlock(&rel_ib_lock);
  293. remove_ib_instance(target_ib);
  294. }
  295. mutex_unlock(&sip_rel_lock);
  296. }
  297. void release_state_func(struct work_struct* work)
  298. {
  299. long qos_values[MAX_RES_COUNT] = {0, };
  300. int res_type = 0;
  301. struct t_ib_target* tv;
  302. struct t_ib_res_info res;
  303. struct t_ib_info* target_ib = container_of(work, struct t_ib_info, ib_state_work[IB_TAIL]);
  304. if (target_ib == NULL)
  305. return;
  306. mutex_lock(&target_ib->lock);
  307. target_ib->isHeadFinished = 1;
  308. pr_info(ITAG" Release State Func :::: Unique_Id(%d) Rel_Flag(%d)",
  309. target_ib->uniq_id, target_ib->rel_flag);
  310. for (res_type = 0; res_type < allowed_res_count; res_type++) {
  311. res = target_ib->ib_dt->res[allowed_resources[res_type]];
  312. if (res.tail_value == 0)
  313. continue;
  314. tv = find_update_target(target_ib->uniq_id, res.res_id);
  315. if (tv == NULL)
  316. continue;
  317. spin_lock(&write_qos_lock);
  318. tv->value = res.tail_value;
  319. spin_unlock(&write_qos_lock);
  320. qos_values[res.res_id] = get_qos_value(res.res_id);
  321. pr_booster("Release State Func :::: Uniq(%d)'s Update Tail Val (%ld), Qos_Val(%ld)",
  322. tv->uniq_id, tv->value, qos_values[res.res_id]);
  323. }
  324. ib_set_booster(qos_values);
  325. // If release event already triggered, tail delay work will be triggered after relese state func.
  326. if (target_ib->rel_flag == FLAG_ON) {
  327. if (!delayed_work_pending(&(target_ib->ib_timeout_work[IB_TAIL]))) {
  328. queue_delayed_work(ib_handle_highwq,
  329. &(target_ib->ib_timeout_work[IB_TAIL]),
  330. msecs_to_jiffies(target_ib->ib_dt->tail_time));
  331. } else {
  332. pr_err(ITAG" Release State Func :: tail timeout start");
  333. }
  334. } else {
  335. queue_delayed_work(ib_handle_highwq,
  336. &(target_ib->ib_timeout_work[IB_TAIL]),
  337. msecs_to_jiffies(60000));
  338. }
  339. mutex_unlock(&target_ib->lock);
  340. }
  341. void release_timeout_func(struct work_struct* work)
  342. {
  343. long qos_values[MAX_RES_COUNT] = {0, };
  344. long rel_flags[MAX_RES_COUNT] = {0, };
  345. struct t_ib_target* tv;
  346. struct t_ib_res_info res;
  347. int res_type;
  348. struct t_ib_info* target_ib = container_of(work, struct t_ib_info, ib_timeout_work[IB_TAIL].work);
  349. if(!target_ib)
  350. return;
  351. pr_info(ITAG" Release Timeout Func :::: Unique_Id(%d)", target_ib->uniq_id);
  352. mutex_lock(&sip_rel_lock);
  353. for (res_type = 0; res_type < allowed_res_count; res_type++) {
  354. res = target_ib->ib_dt->res[allowed_resources[res_type]];
  355. tv = find_update_target(target_ib->uniq_id, res.res_id);
  356. if (tv == NULL) {
  357. pr_err(ITAG" Release Timeout Func :::: %d's TV No Exist(%d)",
  358. target_ib->uniq_id, res.res_id);
  359. continue;
  360. }
  361. pr_booster("Release Timeout Func :::: Delete Uniq(%d)'s TV Val (%ld)",
  362. tv->uniq_id, tv->value);
  363. spin_lock(&write_qos_lock);
  364. list_del_rcu(&(tv->list));
  365. spin_unlock(&write_qos_lock);
  366. synchronize_rcu();
  367. kfree(tv);
  368. tv = NULL;
  369. rcu_read_lock();
  370. if (!list_empty(&qos_list[res.res_id])) {
  371. rcu_read_unlock();
  372. qos_values[res.res_id] = get_qos_value(res.res_id);
  373. pr_booster("Release Timeout Func ::: Uniq(%d) Res(%d) Qos Val(%ld)",
  374. target_ib->uniq_id, res.res_id, qos_values[res.res_id]);
  375. }
  376. else {
  377. rcu_read_unlock();
  378. rel_flags[res.res_id] = 1;
  379. pr_booster("Release Timeout ::: Release Booster(%d's %d) ::: List Empty",
  380. target_ib->uniq_id, res.res_id);
  381. }
  382. }
  383. mutex_lock(&rel_ib_lock);
  384. ib_release_booster(rel_flags);
  385. ib_set_booster(qos_values);
  386. mutex_unlock(&rel_ib_lock);
  387. remove_ib_instance(target_ib);
  388. mutex_unlock(&sip_rel_lock);
  389. }
  390. struct t_ib_target* find_update_target(int uniq_id, int res_id)
  391. {
  392. struct t_ib_target* tv;
  393. rcu_read_lock();
  394. list_for_each_entry_rcu(tv, &qos_list[res_id], list) {
  395. if (tv->uniq_id == uniq_id) {
  396. rcu_read_unlock();
  397. return tv;
  398. }
  399. }
  400. rcu_read_unlock();
  401. return NULL;
  402. }
  403. long get_qos_value(int res_id)
  404. {
  405. //Find tv instance that has max value in the qos_list that has the passed res_id.
  406. struct t_ib_target* tv;
  407. long ret_val = 0;
  408. rcu_read_lock();
  409. if (list_empty(&qos_list[res_id])) {
  410. rcu_read_unlock();
  411. return 0;
  412. }
  413. list_for_each_entry_rcu(tv, &qos_list[res_id], list) {
  414. if (tv->value > ret_val)
  415. ret_val = tv->value;
  416. }
  417. rcu_read_unlock();
  418. return ret_val;
  419. }
  420. void remove_ib_instance(struct t_ib_info* target_ib)
  421. {
  422. struct t_ib_info* ib = NULL;
  423. int ib_exist = 0;
  424. //Check if target instance exists in the list or not.
  425. spin_lock(&write_ib_lock);
  426. list_for_each_entry_rcu(ib, &ib_list[target_ib->ib_dt->type], list) {
  427. if (ib != NULL && ib == target_ib) {
  428. ib_exist = 1;
  429. break;
  430. }
  431. }
  432. if (!ib_exist) {
  433. spin_unlock(&write_ib_lock);
  434. pr_err(ITAG" Del Ib Fail Id : %d", target_ib->uniq_id);
  435. } else {
  436. list_del_rcu(&(target_ib->list));
  437. spin_unlock(&write_ib_lock);
  438. synchronize_rcu();
  439. pr_info(ITAG" Del Ib Instance's Id : %d", target_ib->uniq_id);
  440. mutex_lock(&mem_lock);
  441. if (target_ib != NULL) {
  442. kfree(target_ib);
  443. target_ib = NULL;
  444. }
  445. mutex_unlock(&mem_lock);
  446. }
  447. }
  448. unsigned int create_uniq_id(int type, int code, int slot)
  449. {
  450. //id1 | (id2 << num_bits_id1) | (id3 << (num_bits_id2 + num_bits_id1))
  451. pr_booster("Create Key Id -> type(%d), code(%d), slot(%d)", type, code, slot);
  452. return (type << (TYPE_BITS + CODE_BITS)) | (code << CODE_BITS) | slot;
  453. }
  454. void ib_auto_test(int type, int code, int val)
  455. {
  456. send_ev_enable = 1;
  457. }
  458. //+++++++++++++++++++++++++++++++++++++++++++++++ STRUCT & VARIABLE FOR SYSFS +++++++++++++++++++++++++++++++++++++++++++++++//
  459. SYSFS_CLASS(enable_event, (buf, "%u\n", enable_event), 1)
  460. SYSFS_CLASS(debug_level, (buf, "%u\n", debug_level), 1)
  461. SYSFS_CLASS(sendevent, (buf, "%d\n", sendevent), 3)
  462. HEAD_TAIL_SYSFS_DEVICE(head)
  463. HEAD_TAIL_SYSFS_DEVICE(tail)
  464. LEVEL_SYSFS_DEVICE(level)
  465. struct attribute* dvfs_attributes[] = {
  466. &dev_attr_head.attr,
  467. &dev_attr_tail.attr,
  468. &dev_attr_level.attr,
  469. NULL,
  470. };
  471. struct attribute_group dvfs_attr_group = {
  472. .attrs = dvfs_attributes,
  473. };
  474. void init_sysfs_device(struct class* sysfs_class, struct t_ib_device_tree* ib_dt) {
  475. struct device* sysfs_dev;
  476. int ret = 0;
  477. int bus_ret = 0;
  478. sysfs_dev = device_create(sysfs_class, NULL, 0, ib_dt, "%s", ib_dt->label);
  479. if (IS_ERR(sysfs_dev)) {
  480. ret = IS_ERR(sysfs_dev);
  481. pr_err(ITAG" Failed to create %s sysfs device[%d]n", ib_dt->label, ret);
  482. return;
  483. }
  484. ret = sysfs_create_group(&sysfs_dev->kobj, &dvfs_attr_group);
  485. if (ret) {
  486. pr_err(ITAG" Failed to create %s sysfs groupn", ib_dt->label);
  487. return;
  488. }
  489. }
  490. int parse_dtsi_str(struct device_node *np, const char *target_node, void *target_arr, int isIntType)
  491. {
  492. char prop_str[100];
  493. size_t prop_size = 0;
  494. char *prop_pointer = NULL;
  495. const char *token = NULL;
  496. int iter = 0;
  497. int *int_target_arr_ptr;
  498. char *str_target_arr_ptr;
  499. int copy_result;
  500. const char *full_str = of_get_property(np, target_node, NULL);
  501. if (full_str == NULL)
  502. return -1;
  503. if (isIntType)
  504. int_target_arr_ptr = (int *)target_arr;
  505. else
  506. str_target_arr_ptr = (char *)target_arr;
  507. prop_size = strlcpy(prop_str, full_str, sizeof(char)*100);
  508. prop_pointer = prop_str;
  509. token = strsep(&prop_pointer, ",");
  510. while (token != NULL) {
  511. pr_booster("%s %d's Type Value(%s)", target_node, iter, token);
  512. //Release Values inserted inside array
  513. if (isIntType) {
  514. copy_result = sscanf(token, "%d", &int_target_arr_ptr[iter]);
  515. if (!copy_result) {
  516. pr_err(ITAG"DTSI string value parsing fail");
  517. return -1;
  518. }
  519. pr_booster("Target_arr[%d] : %d", iter, int_target_arr_ptr[iter]);
  520. } else {
  521. copy_result = sscanf(token, "%s", &str_target_arr_ptr[iter]);
  522. if (!copy_result) {
  523. pr_err(ITAG"DTSI string value parsing fail");
  524. return -1;
  525. }
  526. }
  527. token = strsep(&prop_pointer, ",");
  528. iter++;
  529. }
  530. return iter;
  531. }
  532. int is_ib_init_succeed(void)
  533. {
  534. return (ib_trigger != NULL && ib_device_trees != NULL &&
  535. ib_list != NULL && qos_list != NULL) ? 1 : 0;
  536. }
  537. void input_booster_exit(void)
  538. {
  539. kfree(ib_trigger);
  540. kfree(ib_device_trees);
  541. kfree(ib_list);
  542. kfree(qos_list);
  543. kfree(cpu_cluster_policy);
  544. kfree(allowed_resources);
  545. kfree(release_val);
  546. input_booster_exit_vendor();
  547. }
  548. // ********** Init Booster ********** //
  549. void input_booster_init(void)
  550. {
  551. // ********** Load Frequency data from DTSI **********
  552. struct device_node* np;
  553. int i;
  554. int ib_dt_size = sizeof(struct t_ib_device_tree);
  555. int ib_res_size = sizeof(struct t_ib_res_info);
  556. int list_head_size = sizeof(struct list_head);
  557. int ddr_info_size = sizeof(struct t_ddr_info);
  558. int ndevice_in_dt = 0;
  559. int res_cnt;
  560. int result;
  561. total_ib_cnt = 0;
  562. ib_init_succeed = 0;
  563. debug_flag = 0;
  564. enable_event_booster = INIT_ZERO;
  565. max_resource_count = 0;
  566. allowed_res_count = 0;
  567. device_count = 0;
  568. evdev_mt_slot = 0;
  569. trigger_cnt = 0;
  570. send_ev_enable = 0;
  571. spin_lock_init(&write_ib_lock);
  572. spin_lock_init(&write_qos_lock);
  573. mutex_init(&trigger_ib_lock);
  574. mutex_init(&sip_rel_lock);
  575. mutex_init(&rel_ib_lock);
  576. mutex_init(&mem_lock);
  577. //Input Booster Trigger Strcut Init
  578. ib_trigger = kzalloc(sizeof(struct t_ib_trigger) * MAX_IB_COUNT, GFP_KERNEL);
  579. if (ib_trigger == NULL) {
  580. pr_err(ITAG" ib_trigger mem alloc fail");
  581. goto out;
  582. }
  583. for (i = 0; i < MAX_IB_COUNT; i++)
  584. INIT_WORK(&(ib_trigger[i].ib_trigger_work), trigger_input_booster);
  585. np = of_find_compatible_node(NULL, NULL, "input_booster");
  586. if (np == NULL) {
  587. goto out;
  588. }
  589. // Geting the count of devices.
  590. ndevice_in_dt = of_get_child_count(np);
  591. pr_info(ITAG" %s ndevice_in_dt : %d\n", __func__, ndevice_in_dt);
  592. ib_device_trees = kzalloc(ib_dt_size * ndevice_in_dt, GFP_KERNEL);
  593. if (ib_device_trees == NULL) {
  594. pr_err(ITAG" dt_infor mem alloc fail");
  595. goto out;
  596. }
  597. // ib list mem alloc
  598. ib_list = kzalloc(list_head_size * ndevice_in_dt, GFP_KERNEL);
  599. if (ib_list == NULL) {
  600. pr_err(ITAG" ib list mem alloc fail");
  601. goto out;
  602. }
  603. // Get Needed Information from dtsi
  604. result = sscanf((of_get_property(np, "max_resource_count",
  605. NULL)), "%d", &max_resource_count);
  606. if (!result) {
  607. pr_err(ITAG"max_resource_count value parsing fail");
  608. goto out;
  609. }
  610. result = sscanf((of_get_property(np, "max_cluster_count",
  611. NULL)), "%d", &max_cluster_count);
  612. if (!result) {
  613. pr_err(ITAG"max_cluster_count value parsing fail");
  614. goto out;
  615. }
  616. pr_info(ITAG" resource size : %d, cluster count : %d",
  617. max_resource_count, max_cluster_count);
  618. //qos list mem alloc
  619. qos_list = kzalloc(list_head_size * max_resource_count, GFP_KERNEL);
  620. if (qos_list == NULL) {
  621. pr_err(ITAG" ib list mem alloc fail");
  622. goto out;
  623. }
  624. for (res_cnt = 0; res_cnt < max_resource_count; res_cnt++)
  625. INIT_LIST_HEAD(&qos_list[res_cnt]);
  626. //Init Cpu Cluster Value
  627. cpu_cluster_policy = kzalloc(sizeof(int) * max_cluster_count, GFP_KERNEL);
  628. if (cpu_cluster_policy == NULL) {
  629. pr_err(ITAG" cpu_cluster_policy mem alloc fail");
  630. goto out;
  631. }
  632. result = parse_dtsi_str(np, "cpu_cluster_policy", cpu_cluster_policy, 1);
  633. pr_info(ITAG" Init:: Total Cpu Cluster Count : %d", result);
  634. if (result < 0)
  635. goto out;
  636. if (result < max_cluster_count) {
  637. for (i = result; i < max_cluster_count; i++)
  638. cpu_cluster_policy[i] = -1;
  639. }
  640. //Allow Resource
  641. allowed_resources = kzalloc(sizeof(int) * max_resource_count, GFP_KERNEL);
  642. if (allowed_resources == NULL) {
  643. pr_err(ITAG" allowed_resources mem alloc fail");
  644. goto out;
  645. }
  646. result = parse_dtsi_str(np, "allowed_resources", allowed_resources, 1);
  647. pr_info(ITAG" Init:: Total Allow Resource Count: %d", result);
  648. allowed_res_count = result;
  649. if (result < 0)
  650. goto out;
  651. for (i = 0; i < result; i++) {
  652. if (allowed_resources[i] >= max_resource_count) {
  653. pr_err(ITAG" allow res index(%d) exceeds over max res count",
  654. allowed_resources[i]);
  655. goto out;
  656. }
  657. }
  658. if (result > max_resource_count) {
  659. pr_err(ITAG" allow resources exceed over max resource count");
  660. goto out;
  661. }
  662. //Init Resource Release Values
  663. release_val = kzalloc(sizeof(int) * max_resource_count, GFP_KERNEL);
  664. if (release_val == NULL) {
  665. pr_err(ITAG" release_val mem alloc fail");
  666. goto out;
  667. }
  668. result = parse_dtsi_str(np, "ib_release_values", release_val, 1);
  669. pr_info(ITAG" Init:: Total Release Value Count: %d", result);
  670. if (result < 0)
  671. goto out;
  672. if (result > max_resource_count) {
  673. pr_err(ITAG" release value parse fail :: overceed max value");
  674. goto out;
  675. }
  676. struct device_node* cnp;
  677. for_each_child_of_node(np, cnp) {
  678. /************************************************/
  679. // fill all needed data into res_info instance that is in dt instance.
  680. struct t_ib_device_tree* ib_dt = (ib_device_trees + device_count);
  681. struct device_node* child_resource_node;
  682. struct device_node* resource_node = of_find_compatible_node(cnp, NULL, "resource");
  683. ib_dt->res = kzalloc(ib_res_size * max_resource_count, GFP_KERNEL);
  684. for (i = 0; i < max_resource_count; ++i){
  685. ib_dt->res[i].res_id = -1;
  686. ib_dt->res[i].label = 0;
  687. ib_dt->res[i].head_value = 0;
  688. ib_dt->res[i].tail_value = 0;
  689. }
  690. int resource_node_index = 0;
  691. int res_type = 0;
  692. for_each_child_of_node(resource_node, child_resource_node) {
  693. // allowed_resources[resource_node_index] is same as Resource's ID.
  694. ib_dt->res[allowed_resources[resource_node_index]].res_id = allowed_resources[resource_node_index];
  695. ib_dt->res[allowed_resources[resource_node_index]].label = of_get_property(child_resource_node, "resource,label", NULL);
  696. int inputbooster_size = 0;
  697. const u32* is_exist_inputbooster_size = of_get_property(child_resource_node, "resource,value", &inputbooster_size);
  698. if (is_exist_inputbooster_size && inputbooster_size) {
  699. inputbooster_size = inputbooster_size / sizeof(u32);
  700. }
  701. if (inputbooster_size != 2) {
  702. pr_err(ITAG" inputbooster size must be 2!");
  703. return; // error
  704. }
  705. for (res_type = 0; res_type < inputbooster_size; ++res_type) {
  706. if (res_type == IB_HEAD) {
  707. of_property_read_u32_index(child_resource_node, "resource,value",
  708. res_type, &ib_dt->res[allowed_resources[resource_node_index]].head_value);
  709. }
  710. else if (res_type == IB_TAIL) {
  711. of_property_read_u32_index(child_resource_node, "resource,value",
  712. res_type, &ib_dt->res[allowed_resources[resource_node_index]].tail_value);
  713. }
  714. }
  715. resource_node_index++;
  716. }
  717. ib_dt->label = of_get_property(cnp, "input_booster,label", NULL);
  718. pr_info(ITAG" %s ib_dt->label : %s\n", __func__, ib_dt->label);
  719. if (of_property_read_u32(cnp, "input_booster,type", &ib_dt->type)) {
  720. pr_err(ITAG" Failed to get type property\n");
  721. break;
  722. }
  723. if (of_property_read_u32(cnp, "input_booster,head_time", &ib_dt->head_time)) {
  724. pr_err(ITAG" Fail Get Head Time\n");
  725. break;
  726. }
  727. if (of_property_read_u32(cnp, "input_booster,tail_time", &ib_dt->tail_time)) {
  728. pr_err(ITAG" Fail Get Tail Time\n");
  729. break;
  730. }
  731. //Init all type of ib list.
  732. INIT_LIST_HEAD(&ib_list[device_count]);
  733. device_count++;
  734. }
  735. ib_init_succeed = is_ib_init_succeed();
  736. pr_info(ITAG" Total Input Device Count(%d), IsSuccess(%d)", device_count, ib_init_succeed);
  737. ib_init_succeed = input_booster_init_vendor();
  738. if (ib_init_succeed)
  739. ib_handle_highwq = alloc_workqueue("ib_unbound_high_wq", WQ_UNBOUND | WQ_HIGHPRI,
  740. MAX_IB_COUNT);
  741. out:
  742. // ********** Initialize Sysfs **********
  743. {
  744. struct class* sysfs_class;
  745. int ret;
  746. int ib_type;
  747. sysfs_class = class_create(THIS_MODULE, "input_booster");
  748. if (IS_ERR(sysfs_class)) {
  749. pr_err(ITAG" Failed to create class\n");
  750. return;
  751. }
  752. if (ib_init_succeed) {
  753. INIT_SYSFS_CLASS(enable_event)
  754. INIT_SYSFS_CLASS(debug_level)
  755. INIT_SYSFS_CLASS(sendevent)
  756. for (ib_type = 0; ib_type < ndevice_in_dt; ib_type++) {
  757. init_sysfs_device(sysfs_class, &ib_device_trees[ib_type]);
  758. }
  759. }
  760. }
  761. }
  762. #endif //CONFIG_SEC_INPUT_BOOSTER_QC || CONFIG_SEC_INPUT_BOOSTER_SLSI || CONFIG_SEC_INPUT_BOOSTER_MTK
  763. MODULE_LICENSE("GPL");