ipa_pm.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include "ipa_pm.h"
  7. #include "ipa_i.h"
  8. #define IPA_PM_DRV_NAME "ipa_pm"
  9. #define IPA_PM_DBG(fmt, args...) \
  10. do { \
  11. pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
  12. __func__, __LINE__, ## args); \
  13. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  14. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  15. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  16. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  17. } while (0)
  18. #define IPA_PM_DBG_LOW(fmt, args...) \
  19. do { \
  20. pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
  21. __func__, __LINE__, ## args); \
  22. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  23. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  24. } while (0)
  25. #define IPA_PM_ERR(fmt, args...) \
  26. do { \
  27. pr_err(IPA_PM_DRV_NAME " %s:%d " fmt, \
  28. __func__, __LINE__, ## args); \
  29. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  30. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  31. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  32. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  33. } while (0)
  34. #define IPA_PM_DBG_STATE(hdl, name, state) \
  35. IPA_PM_DBG_LOW("Client[%d] %s: %s\n", hdl, name, \
  36. client_state_to_str[state])
  37. #if IPA_PM_MAX_CLIENTS > 32
  38. #error max client greater than 32 all bitmask types should be changed
  39. #endif
  40. /*
  41. * struct ipa_pm_exception_list - holds information about an exception
  42. * @pending: number of clients in exception that have not yet been adctivated
  43. * @bitmask: bitmask of the clients in the exception based on handle
  44. * @threshold: the threshold values for the exception
  45. */
  46. struct ipa_pm_exception_list {
  47. char clients[IPA_PM_MAX_EX_CL];
  48. int pending;
  49. u32 bitmask;
  50. int threshold[IPA_PM_THRESHOLD_MAX];
  51. };
  52. /*
  53. * struct clk_scaling_db - holds information about threshholds and exceptions
  54. * @lock: lock the bitmasks and thresholds
  55. * @exception_list: pointer to the list of exceptions
  56. * @work: work for clock scaling algorithm
  57. * @active_client_bitmask: the bits represent handles in the clients array that
  58. * contain non-null client
  59. * @threshold_size: size of the throughput threshold
  60. * @exception_size: size of the exception list
  61. * @cur_vote: idx of the threshold
  62. * @default_threshold: the thresholds used if no exception passes
  63. * @current_threshold: the current threshold of the clock plan
  64. */
  65. struct clk_scaling_db {
  66. spinlock_t lock;
  67. struct ipa_pm_exception_list exception_list[IPA_PM_EXCEPTION_MAX];
  68. struct work_struct work;
  69. u32 active_client_bitmask;
  70. int threshold_size;
  71. int exception_size;
  72. int cur_vote;
  73. int default_threshold[IPA_PM_THRESHOLD_MAX];
  74. int *current_threshold;
  75. };
  76. /*
  77. * ipa_pm state names
  78. *
  79. * Timer free states:
  80. * @IPA_PM_DEACTIVATED: client starting state when registered
  81. * @IPA_PM_DEACTIVATE_IN_PROGRESS: deactivate was called in progress of a client
  82. * activating
  83. * @IPA_PM_ACTIVATE_IN_PROGRESS: client is being activated by work_queue
  84. * @IPA_PM_ACTIVATED: client is activated without any timers
  85. *
  86. * Timer set states:
  87. * @IPA_PM_ACTIVATED_PENDING_DEACTIVATION: moves to deactivate once timer pass
  88. * @IPA_PM_ACTIVATED_TIMER_SET: client was activated while timer was set, so
  89. * when the timer pass, client will still be activated
  90. *@IPA_PM_ACTIVATED_PENDING_RESCHEDULE: state signifying extended timer when
  91. * a client is deferred_deactivated when a time ris still active
  92. */
  93. enum ipa_pm_state {
  94. IPA_PM_DEACTIVATED,
  95. IPA_PM_DEACTIVATE_IN_PROGRESS,
  96. IPA_PM_ACTIVATE_IN_PROGRESS,
  97. IPA_PM_ACTIVATED,
  98. IPA_PM_ACTIVATED_PENDING_DEACTIVATION,
  99. IPA_PM_ACTIVATED_TIMER_SET,
  100. IPA_PM_ACTIVATED_PENDING_RESCHEDULE,
  101. IPA_PM_STATE_MAX
  102. };
  103. #define IPA_PM_STATE_ACTIVE(state) \
  104. (state == IPA_PM_ACTIVATED ||\
  105. state == IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||\
  106. state == IPA_PM_ACTIVATED_TIMER_SET ||\
  107. state == IPA_PM_ACTIVATED_PENDING_RESCHEDULE)
  108. #define IPA_PM_STATE_IN_PROGRESS(state) \
  109. (state == IPA_PM_ACTIVATE_IN_PROGRESS \
  110. || state == IPA_PM_DEACTIVATE_IN_PROGRESS)
  111. /*
  112. * struct ipa_pm_client - holds information about a specific IPA client
  113. * @name: string name of the client
  114. * @callback: pointer to the client's callback function
  115. * @callback_params: pointer to the client's callback parameters
  116. * @state: Activation state of the client
  117. * @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote
  118. * @group: the ipa_pm_group the client belongs to
  119. * @hdl: handle of the client
  120. * @throughput: the throughput of the client for clock scaling
  121. * @state_lock: spinlock to lock the pm_states
  122. * @activate_work: work for activate (blocking case)
  123. * @deactivate work: delayed work for deferred_deactivate function
  124. * @complete: generic wait-for-completion handler
  125. * @wlock: wake source to prevent AP suspend
  126. */
  127. struct ipa_pm_client {
  128. char name[IPA_PM_MAX_EX_CL];
  129. void (*callback)(void *user_data, enum ipa_pm_cb_event);
  130. void *callback_params;
  131. enum ipa_pm_state state;
  132. bool skip_clk_vote;
  133. int group;
  134. int hdl;
  135. int throughput;
  136. spinlock_t state_lock;
  137. struct work_struct activate_work;
  138. struct delayed_work deactivate_work;
  139. struct completion complete;
  140. struct wakeup_source wlock;
  141. };
  142. /*
  143. * struct ipa_pm_ctx - global ctx that will hold the client arrays and tput info
  144. * @clients: array to the clients with the handle as its index
  145. * @clients_by_pipe: array to the clients with endpoint as the index
  146. * @wq: work queue for deferred deactivate, activate, and clk_scaling work
  147. 8 @clk_scaling: pointer to clock scaling database
  148. * @client_mutex: global mutex to lock the client arrays
  149. * @aggragated_tput: aggragated tput value of all valid activated clients
  150. * @group_tput: combined throughput for the groups
  151. */
  152. struct ipa_pm_ctx {
  153. struct ipa_pm_client *clients[IPA_PM_MAX_CLIENTS];
  154. struct ipa_pm_client *clients_by_pipe[IPA3_MAX_NUM_PIPES];
  155. struct workqueue_struct *wq;
  156. struct clk_scaling_db clk_scaling;
  157. struct mutex client_mutex;
  158. int aggregated_tput;
  159. int group_tput[IPA_PM_GROUP_MAX];
  160. };
  161. static struct ipa_pm_ctx *ipa_pm_ctx;
  162. static const char *client_state_to_str[IPA_PM_STATE_MAX] = {
  163. __stringify(IPA_PM_DEACTIVATED),
  164. __stringify(IPA_PM_DEACTIVATE_IN_PROGRESS),
  165. __stringify(IPA_PM_ACTIVATE_IN_PROGRESS),
  166. __stringify(IPA_PM_ACTIVATED),
  167. __stringify(IPA_PM_ACTIVATED_PENDING_DEACTIVATION),
  168. __stringify(IPA_PM_ACTIVATED_TIMER_SET),
  169. __stringify(IPA_PM_ACTIVATED_PENDING_RESCHEDULE),
  170. };
  171. static const char *ipa_pm_group_to_str[IPA_PM_GROUP_MAX] = {
  172. __stringify(IPA_PM_GROUP_DEFAULT),
  173. __stringify(IPA_PM_GROUP_APPS),
  174. __stringify(IPA_PM_GROUP_MODEM),
  175. };
  176. /**
  177. * pop_max_from_array() -pop the max and move the last element to where the
  178. * max was popped
  179. * @arr: array to be searched for max
  180. * @n: size of the array
  181. *
  182. * Returns: max value of the array
  183. */
  184. static int pop_max_from_array(int *arr, int *n)
  185. {
  186. int i;
  187. int max, max_idx;
  188. max_idx = *n - 1;
  189. max = 0;
  190. if (*n == 0)
  191. return 0;
  192. for (i = 0; i < *n; i++) {
  193. if (arr[i] > max) {
  194. max = arr[i];
  195. max_idx = i;
  196. }
  197. }
  198. (*n)--;
  199. arr[max_idx] = arr[*n];
  200. return max;
  201. }
  202. /**
  203. * calculate_throughput() - calculate the aggregated throughput
  204. * based on active clients
  205. *
  206. * Returns: aggregated tput value
  207. */
  208. static int calculate_throughput(void)
  209. {
  210. int client_tput[IPA_PM_MAX_CLIENTS] = { 0 };
  211. bool group_voted[IPA_PM_GROUP_MAX] = { false };
  212. int i, n;
  213. int max, second_max, aggregated_tput;
  214. struct ipa_pm_client *client;
  215. /* Create a basic array to hold throughputs*/
  216. for (i = 1, n = 0; i < IPA_PM_MAX_CLIENTS; i++) {
  217. client = ipa_pm_ctx->clients[i];
  218. if (client != NULL && IPA_PM_STATE_ACTIVE(client->state)) {
  219. /* default case */
  220. if (client->group == IPA_PM_GROUP_DEFAULT) {
  221. client_tput[n++] = client->throughput;
  222. } else if (!group_voted[client->group]) {
  223. client_tput[n++] = ipa_pm_ctx->group_tput
  224. [client->group];
  225. group_voted[client->group] = true;
  226. }
  227. }
  228. }
  229. /*the array will only use n+1 spots. n will be the last index used*/
  230. aggregated_tput = 0;
  231. /**
  232. * throughput algorithm:
  233. * 1) pop the max and second_max
  234. * 2) add the 2nd max to aggregated tput
  235. * 3) insert the value of max - 2nd max
  236. * 4) repeat until array is of size 1
  237. */
  238. while (n > 1) {
  239. max = pop_max_from_array(client_tput, &n);
  240. second_max = pop_max_from_array(client_tput, &n);
  241. client_tput[n++] = max - second_max;
  242. aggregated_tput += second_max;
  243. }
  244. IPA_PM_DBG_LOW("Aggregated throughput: %d\n", aggregated_tput);
  245. return aggregated_tput;
  246. }
  247. /**
  248. * deactivate_client() - turn off the bit in the active client bitmask based on
  249. * the handle passed in
  250. * @hdl: The index of the client to be deactivated
  251. */
  252. static void deactivate_client(u32 hdl)
  253. {
  254. unsigned long flags;
  255. spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
  256. ipa_pm_ctx->clk_scaling.active_client_bitmask &= ~(1 << hdl);
  257. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
  258. IPA_PM_DBG_LOW("active bitmask: %x\n",
  259. ipa_pm_ctx->clk_scaling.active_client_bitmask);
  260. }
  261. /**
  262. * activate_client() - turn on the bit in the active client bitmask based on
  263. * the handle passed in
  264. * @hdl: The index of the client to be activated
  265. */
  266. static void activate_client(u32 hdl)
  267. {
  268. unsigned long flags;
  269. spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
  270. ipa_pm_ctx->clk_scaling.active_client_bitmask |= (1 << hdl);
  271. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
  272. IPA_PM_DBG_LOW("active bitmask: %x\n",
  273. ipa_pm_ctx->clk_scaling.active_client_bitmask);
  274. }
  275. /**
  276. * deactivate_client() - get threshold
  277. *
  278. * Returns: threshold of the exception that passes or default if none pass
  279. */
  280. static void set_current_threshold(void)
  281. {
  282. int i;
  283. struct clk_scaling_db *clk;
  284. struct ipa_pm_exception_list *exception;
  285. unsigned long flags;
  286. clk = &ipa_pm_ctx->clk_scaling;
  287. spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
  288. for (i = 0; i < clk->exception_size; i++) {
  289. exception = &clk->exception_list[i];
  290. if (exception->pending == 0 && (exception->bitmask
  291. & ~clk->active_client_bitmask) == 0) {
  292. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock,
  293. flags);
  294. clk->current_threshold = exception->threshold;
  295. IPA_PM_DBG("Exception %d set\n", i);
  296. return;
  297. }
  298. }
  299. clk->current_threshold = clk->default_threshold;
  300. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
  301. }
  302. /**
  303. * do_clk_scaling() - set the clock based on the activated clients
  304. *
  305. * Returns: 0 if success, negative otherwise
  306. */
  307. static int do_clk_scaling(void)
  308. {
  309. int i, tput;
  310. int new_th_idx = 1;
  311. struct clk_scaling_db *clk_scaling;
  312. if (atomic_read(&ipa3_ctx->ipa_clk_vote) == 0) {
  313. IPA_PM_DBG("IPA clock is gated\n");
  314. return 0;
  315. }
  316. clk_scaling = &ipa_pm_ctx->clk_scaling;
  317. mutex_lock(&ipa_pm_ctx->client_mutex);
  318. IPA_PM_DBG_LOW("clock scaling started\n");
  319. tput = calculate_throughput();
  320. ipa_pm_ctx->aggregated_tput = tput;
  321. set_current_threshold();
  322. mutex_unlock(&ipa_pm_ctx->client_mutex);
  323. for (i = 0; i < clk_scaling->threshold_size; i++) {
  324. if (tput > clk_scaling->current_threshold[i])
  325. new_th_idx++;
  326. }
  327. IPA_PM_DBG_LOW("old idx was at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
  328. if (ipa_pm_ctx->clk_scaling.cur_vote != new_th_idx) {
  329. ipa_pm_ctx->clk_scaling.cur_vote = new_th_idx;
  330. ipa3_set_clock_plan_from_pm(ipa_pm_ctx->clk_scaling.cur_vote);
  331. }
  332. IPA_PM_DBG_LOW("new idx is at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
  333. return 0;
  334. }
  335. /**
  336. * clock_scaling_func() - set the clock on a work queue
  337. */
  338. static void clock_scaling_func(struct work_struct *work)
  339. {
  340. do_clk_scaling();
  341. }
  342. /**
  343. * activate_work_func - activate a client and vote for clock on a work queue
  344. */
  345. static void activate_work_func(struct work_struct *work)
  346. {
  347. struct ipa_pm_client *client;
  348. bool dec_clk = false;
  349. unsigned long flags;
  350. client = container_of(work, struct ipa_pm_client, activate_work);
  351. if (!client->skip_clk_vote) {
  352. IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
  353. if (client->group == IPA_PM_GROUP_APPS)
  354. __pm_stay_awake(&client->wlock);
  355. }
  356. spin_lock_irqsave(&client->state_lock, flags);
  357. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  358. if (client->state == IPA_PM_ACTIVATE_IN_PROGRESS) {
  359. client->state = IPA_PM_ACTIVATED;
  360. } else if (client->state == IPA_PM_DEACTIVATE_IN_PROGRESS) {
  361. client->state = IPA_PM_DEACTIVATED;
  362. dec_clk = true;
  363. } else {
  364. IPA_PM_ERR("unexpected state %d\n", client->state);
  365. WARN_ON(1);
  366. }
  367. spin_unlock_irqrestore(&client->state_lock, flags);
  368. complete_all(&client->complete);
  369. if (dec_clk) {
  370. if (!client->skip_clk_vote) {
  371. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  372. if (client->group == IPA_PM_GROUP_APPS)
  373. __pm_relax(&client->wlock);
  374. }
  375. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  376. return;
  377. }
  378. activate_client(client->hdl);
  379. mutex_lock(&ipa_pm_ctx->client_mutex);
  380. if (client->callback) {
  381. client->callback(client->callback_params,
  382. IPA_PM_CLIENT_ACTIVATED);
  383. } else {
  384. IPA_PM_ERR("client has no callback");
  385. WARN_ON(1);
  386. }
  387. mutex_unlock(&ipa_pm_ctx->client_mutex);
  388. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  389. do_clk_scaling();
  390. }
  391. /**
  392. * delayed_deferred_deactivate_work_func - deferred deactivate on a work queue
  393. */
  394. static void delayed_deferred_deactivate_work_func(struct work_struct *work)
  395. {
  396. struct delayed_work *dwork;
  397. struct ipa_pm_client *client;
  398. unsigned long flags;
  399. unsigned long delay;
  400. dwork = container_of(work, struct delayed_work, work);
  401. client = container_of(dwork, struct ipa_pm_client, deactivate_work);
  402. spin_lock_irqsave(&client->state_lock, flags);
  403. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  404. switch (client->state) {
  405. case IPA_PM_ACTIVATED_TIMER_SET:
  406. client->state = IPA_PM_ACTIVATED;
  407. goto bail;
  408. case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
  409. delay = IPA_PM_DEFERRED_TIMEOUT;
  410. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
  411. ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
  412. delay *= 5;
  413. queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
  414. msecs_to_jiffies(delay));
  415. client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
  416. goto bail;
  417. case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
  418. client->state = IPA_PM_DEACTIVATED;
  419. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  420. spin_unlock_irqrestore(&client->state_lock, flags);
  421. if (!client->skip_clk_vote) {
  422. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  423. if (client->group == IPA_PM_GROUP_APPS)
  424. __pm_relax(&client->wlock);
  425. }
  426. deactivate_client(client->hdl);
  427. do_clk_scaling();
  428. return;
  429. default:
  430. IPA_PM_ERR("unexpected state %d\n", client->state);
  431. WARN_ON(1);
  432. goto bail;
  433. }
  434. bail:
  435. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  436. spin_unlock_irqrestore(&client->state_lock, flags);
  437. }
  438. static int find_next_open_array_element(const char *name)
  439. {
  440. int i, n;
  441. n = -ENOBUFS;
  442. /* 0 is not a valid handle */
  443. for (i = IPA_PM_MAX_CLIENTS - 1; i >= 1; i--) {
  444. if (ipa_pm_ctx->clients[i] == NULL) {
  445. n = i;
  446. continue;
  447. }
  448. if (strlen(name) == strlen(ipa_pm_ctx->clients[i]->name))
  449. if (!strcmp(name, ipa_pm_ctx->clients[i]->name))
  450. return -EEXIST;
  451. }
  452. return n;
  453. }
  454. /**
  455. * add_client_to_exception_list() - add client to the exception list and
  456. * update pending if necessary
  457. * @hdl: index of the IPA client
  458. *
  459. * Returns: 0 if success, negative otherwise
  460. */
  461. static int add_client_to_exception_list(u32 hdl)
  462. {
  463. int i;
  464. struct ipa_pm_exception_list *exception;
  465. mutex_lock(&ipa_pm_ctx->client_mutex);
  466. for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
  467. exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
  468. if (strnstr(exception->clients, ipa_pm_ctx->clients[hdl]->name,
  469. strlen(exception->clients))) {
  470. exception->pending--;
  471. if (exception->pending < 0) {
  472. WARN_ON(1);
  473. exception->pending = 0;
  474. mutex_unlock(&ipa_pm_ctx->client_mutex);
  475. return -EPERM;
  476. }
  477. exception->bitmask |= (1 << hdl);
  478. }
  479. }
  480. IPA_PM_DBG("%s added to exception list\n",
  481. ipa_pm_ctx->clients[hdl]->name);
  482. mutex_unlock(&ipa_pm_ctx->client_mutex);
  483. return 0;
  484. }
  485. /**
  486. * remove_client_to_exception_list() - remove client from the exception list and
  487. * update pending if necessary
  488. * @hdl: index of the IPA client
  489. *
  490. * Returns: 0 if success, negative otherwise
  491. */
  492. static int remove_client_from_exception_list(u32 hdl)
  493. {
  494. int i;
  495. struct ipa_pm_exception_list *exception;
  496. for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
  497. exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
  498. if (exception->bitmask & (1 << hdl)) {
  499. exception->pending++;
  500. exception->bitmask &= ~(1 << hdl);
  501. }
  502. }
  503. IPA_PM_DBG("Client %d removed from exception list\n", hdl);
  504. return 0;
  505. }
  506. /**
  507. * ipa_pm_init() - initialize IPA PM Components
  508. * @ipa_pm_init_params: parameters needed to fill exceptions and thresholds
  509. *
  510. * Returns: 0 on success, negative on failure
  511. */
  512. int ipa_pm_init(struct ipa_pm_init_params *params)
  513. {
  514. int i, j;
  515. struct clk_scaling_db *clk_scaling;
  516. if (params == NULL) {
  517. IPA_PM_ERR("Invalid Params\n");
  518. return -EINVAL;
  519. }
  520. if (params->threshold_size <= 0
  521. || params->threshold_size > IPA_PM_THRESHOLD_MAX) {
  522. IPA_PM_ERR("Invalid threshold size\n");
  523. return -EINVAL;
  524. }
  525. if (params->exception_size < 0
  526. || params->exception_size > IPA_PM_EXCEPTION_MAX) {
  527. IPA_PM_ERR("Invalid exception size\n");
  528. return -EINVAL;
  529. }
  530. IPA_PM_DBG("IPA PM initialization started\n");
  531. if (ipa_pm_ctx != NULL) {
  532. IPA_PM_ERR("Already initialized\n");
  533. return -EPERM;
  534. }
  535. ipa_pm_ctx = kzalloc(sizeof(*ipa_pm_ctx), GFP_KERNEL);
  536. if (!ipa_pm_ctx) {
  537. IPA_PM_ERR(":kzalloc err.\n");
  538. return -ENOMEM;
  539. }
  540. ipa_pm_ctx->wq = create_singlethread_workqueue("ipa_pm_activate");
  541. if (!ipa_pm_ctx->wq) {
  542. IPA_PM_ERR("create workqueue failed\n");
  543. kfree(ipa_pm_ctx);
  544. return -ENOMEM;
  545. }
  546. mutex_init(&ipa_pm_ctx->client_mutex);
  547. /* Populate and init locks in clk_scaling_db */
  548. clk_scaling = &ipa_pm_ctx->clk_scaling;
  549. spin_lock_init(&clk_scaling->lock);
  550. clk_scaling->threshold_size = params->threshold_size;
  551. clk_scaling->exception_size = params->exception_size;
  552. INIT_WORK(&clk_scaling->work, clock_scaling_func);
  553. for (i = 0; i < params->threshold_size; i++)
  554. clk_scaling->default_threshold[i] =
  555. params->default_threshold[i];
  556. /* Populate exception list*/
  557. for (i = 0; i < params->exception_size; i++) {
  558. strlcpy(clk_scaling->exception_list[i].clients,
  559. params->exceptions[i].usecase, IPA_PM_MAX_EX_CL);
  560. IPA_PM_DBG("Usecase: %s\n", params->exceptions[i].usecase);
  561. /* Parse the commas to count the size of the clients */
  562. for (j = 0; j < IPA_PM_MAX_EX_CL &&
  563. clk_scaling->exception_list[i].clients[j]; j++) {
  564. if (clk_scaling->exception_list[i].clients[j] == ',')
  565. clk_scaling->exception_list[i].pending++;
  566. }
  567. clk_scaling->exception_list[i].pending++;
  568. IPA_PM_DBG("Pending: %d\n",
  569. clk_scaling->exception_list[i].pending);
  570. /* populate the threshold */
  571. for (j = 0; j < params->threshold_size; j++) {
  572. clk_scaling->exception_list[i].threshold[j]
  573. = params->exceptions[i].threshold[j];
  574. }
  575. }
  576. IPA_PM_DBG("initialization success");
  577. return 0;
  578. }
  579. int ipa_pm_destroy(void)
  580. {
  581. IPA_PM_DBG("IPA PM destroy started\n");
  582. if (ipa_pm_ctx == NULL) {
  583. IPA_PM_ERR("Already destroyed\n");
  584. return -EPERM;
  585. }
  586. destroy_workqueue(ipa_pm_ctx->wq);
  587. kfree(ipa_pm_ctx);
  588. ipa_pm_ctx = NULL;
  589. return 0;
  590. }
  591. /**
  592. * ipa_pm_register() - register an IPA PM client with the PM
  593. * @register_params: params for a client like throughput, callback, etc.
  594. * @hdl: int pointer that will be used as an index to access the client
  595. *
  596. * Returns: 0 on success, negative on failure
  597. *
  598. * Side effects: *hdl is replaced with the client index or -EEXIST if
  599. * client is already registered
  600. */
  601. int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl)
  602. {
  603. struct ipa_pm_client *client;
  604. struct wakeup_source *wlock;
  605. int elem;
  606. if (ipa_pm_ctx == NULL) {
  607. IPA_PM_ERR("PM_ctx is null\n");
  608. return -EINVAL;
  609. }
  610. if (params == NULL || hdl == NULL || params->name == NULL) {
  611. IPA_PM_ERR("Invalid Params\n");
  612. return -EINVAL;
  613. }
  614. IPA_PM_DBG("IPA PM registering client\n");
  615. mutex_lock(&ipa_pm_ctx->client_mutex);
  616. elem = find_next_open_array_element(params->name);
  617. *hdl = elem;
  618. if (elem < 0 || elem > IPA_PM_MAX_CLIENTS) {
  619. mutex_unlock(&ipa_pm_ctx->client_mutex);
  620. IPA_PM_ERR("client already registered or full array elem=%d\n",
  621. elem);
  622. return elem;
  623. }
  624. ipa_pm_ctx->clients[*hdl] = kzalloc(sizeof
  625. (struct ipa_pm_client), GFP_KERNEL);
  626. if (!ipa_pm_ctx->clients[*hdl]) {
  627. mutex_unlock(&ipa_pm_ctx->client_mutex);
  628. IPA_PM_ERR(":kzalloc err.\n");
  629. return -ENOMEM;
  630. }
  631. mutex_unlock(&ipa_pm_ctx->client_mutex);
  632. client = ipa_pm_ctx->clients[*hdl];
  633. spin_lock_init(&client->state_lock);
  634. INIT_DELAYED_WORK(&client->deactivate_work,
  635. delayed_deferred_deactivate_work_func);
  636. INIT_WORK(&client->activate_work, activate_work_func);
  637. /* populate fields */
  638. strlcpy(client->name, params->name, IPA_PM_MAX_EX_CL);
  639. client->callback = params->callback;
  640. client->callback_params = params->user_data;
  641. client->group = params->group;
  642. client->hdl = *hdl;
  643. client->skip_clk_vote = params->skip_clk_vote;
  644. wlock = &client->wlock;
  645. wakeup_source_init(wlock, client->name);
  646. init_completion(&client->complete);
  647. /* add client to exception list */
  648. if (add_client_to_exception_list(*hdl)) {
  649. ipa_pm_deregister(*hdl);
  650. IPA_PM_ERR("Fail to add client to exception_list\n");
  651. return -EPERM;
  652. }
  653. IPA_PM_DBG("IPA PM client registered with handle %d\n", *hdl);
  654. return 0;
  655. }
  656. /**
  657. * ipa_pm_deregister() - deregister IPA client from the PM
  658. * @hdl: index of the client in the array
  659. *
  660. * Returns: 0 on success, negative on failure
  661. */
  662. int ipa_pm_deregister(u32 hdl)
  663. {
  664. struct ipa_pm_client *client;
  665. int i;
  666. unsigned long flags;
  667. if (ipa_pm_ctx == NULL) {
  668. IPA_PM_ERR("PM_ctx is null\n");
  669. return -EINVAL;
  670. }
  671. if (hdl >= IPA_PM_MAX_CLIENTS) {
  672. IPA_PM_ERR("Invalid Param\n");
  673. return -EINVAL;
  674. }
  675. if (ipa_pm_ctx->clients[hdl] == NULL) {
  676. IPA_PM_ERR("Client is Null\n");
  677. return -EINVAL;
  678. }
  679. IPA_PM_DBG("IPA PM deregistering client\n");
  680. client = ipa_pm_ctx->clients[hdl];
  681. spin_lock_irqsave(&client->state_lock, flags);
  682. if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
  683. spin_unlock_irqrestore(&client->state_lock, flags);
  684. wait_for_completion(&client->complete);
  685. spin_lock_irqsave(&client->state_lock, flags);
  686. }
  687. if (IPA_PM_STATE_ACTIVE(client->state)) {
  688. IPA_PM_DBG("Activated clients cannot be deregistered");
  689. spin_unlock_irqrestore(&client->state_lock, flags);
  690. return -EPERM;
  691. }
  692. spin_unlock_irqrestore(&client->state_lock, flags);
  693. mutex_lock(&ipa_pm_ctx->client_mutex);
  694. /* nullify pointers in pipe array */
  695. for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
  696. if (ipa_pm_ctx->clients_by_pipe[i] == ipa_pm_ctx->clients[hdl])
  697. ipa_pm_ctx->clients_by_pipe[i] = NULL;
  698. }
  699. wakeup_source_trash(&client->wlock);
  700. kfree(client);
  701. ipa_pm_ctx->clients[hdl] = NULL;
  702. remove_client_from_exception_list(hdl);
  703. IPA_PM_DBG("IPA PM client %d deregistered\n", hdl);
  704. mutex_unlock(&ipa_pm_ctx->client_mutex);
  705. return 0;
  706. }
  707. /**
  708. * ipa_pm_associate_ipa_cons_to_client() - add mapping to pipe with ipa cllent
  709. * @hdl: index of the client to be mapped
  710. * @consumer: the pipe/consumer name to be pipped to the client
  711. *
  712. * Returns: 0 on success, negative on failure
  713. *
  714. * Side effects: multiple pipes are allowed to be mapped to a single client
  715. */
  716. int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer)
  717. {
  718. int idx;
  719. if (ipa_pm_ctx == NULL) {
  720. IPA_PM_ERR("PM_ctx is null\n");
  721. return -EINVAL;
  722. }
  723. if (hdl >= IPA_PM_MAX_CLIENTS || consumer < 0 ||
  724. consumer >= IPA_CLIENT_MAX) {
  725. IPA_PM_ERR("invalid params\n");
  726. return -EINVAL;
  727. }
  728. mutex_lock(&ipa_pm_ctx->client_mutex);
  729. if (ipa_pm_ctx->clients[hdl] == NULL) {
  730. mutex_unlock(&ipa_pm_ctx->client_mutex);
  731. IPA_PM_ERR("Client is NULL\n");
  732. return -EPERM;
  733. }
  734. idx = ipa_get_ep_mapping(consumer);
  735. if (idx < 0) {
  736. mutex_unlock(&ipa_pm_ctx->client_mutex);
  737. IPA_PM_DBG("Pipe is not used\n");
  738. return 0;
  739. }
  740. IPA_PM_DBG("Mapping pipe %d to client %d\n", idx, hdl);
  741. if (ipa_pm_ctx->clients_by_pipe[idx] != NULL) {
  742. mutex_unlock(&ipa_pm_ctx->client_mutex);
  743. IPA_PM_ERR("Pipe is already mapped\n");
  744. return -EPERM;
  745. }
  746. ipa_pm_ctx->clients_by_pipe[idx] = ipa_pm_ctx->clients[hdl];
  747. mutex_unlock(&ipa_pm_ctx->client_mutex);
  748. IPA_PM_DBG("Pipe %d is mapped to client %d\n", idx, hdl);
  749. return 0;
  750. }
  751. static int ipa_pm_activate_helper(struct ipa_pm_client *client, bool sync)
  752. {
  753. struct ipa_active_client_logging_info log_info;
  754. int result = 0;
  755. unsigned long flags;
  756. spin_lock_irqsave(&client->state_lock, flags);
  757. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  758. if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
  759. if (sync) {
  760. spin_unlock_irqrestore(&client->state_lock, flags);
  761. wait_for_completion(&client->complete);
  762. spin_lock_irqsave(&client->state_lock, flags);
  763. } else {
  764. client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
  765. spin_unlock_irqrestore(&client->state_lock, flags);
  766. return -EINPROGRESS;
  767. }
  768. }
  769. switch (client->state) {
  770. case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
  771. case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
  772. client->state = IPA_PM_ACTIVATED_TIMER_SET;
  773. case IPA_PM_ACTIVATED:
  774. case IPA_PM_ACTIVATED_TIMER_SET:
  775. spin_unlock_irqrestore(&client->state_lock, flags);
  776. return 0;
  777. case IPA_PM_DEACTIVATED:
  778. break;
  779. default:
  780. IPA_PM_ERR("Invalid State\n");
  781. spin_unlock_irqrestore(&client->state_lock, flags);
  782. return -EPERM;
  783. }
  784. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  785. IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, client->name);
  786. if (!client->skip_clk_vote) {
  787. if (sync) {
  788. client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
  789. spin_unlock_irqrestore(&client->state_lock, flags);
  790. IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
  791. spin_lock_irqsave(&client->state_lock, flags);
  792. } else
  793. result = ipa3_inc_client_enable_clks_no_block
  794. (&log_info);
  795. }
  796. /* we got the clocks */
  797. if (result == 0) {
  798. client->state = IPA_PM_ACTIVATED;
  799. if (client->group == IPA_PM_GROUP_APPS)
  800. __pm_stay_awake(&client->wlock);
  801. spin_unlock_irqrestore(&client->state_lock, flags);
  802. activate_client(client->hdl);
  803. if (sync)
  804. do_clk_scaling();
  805. else
  806. queue_work(ipa_pm_ctx->wq,
  807. &ipa_pm_ctx->clk_scaling.work);
  808. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  809. return 0;
  810. }
  811. client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
  812. reinit_completion(&client->complete);
  813. queue_work(ipa_pm_ctx->wq, &client->activate_work);
  814. spin_unlock_irqrestore(&client->state_lock, flags);
  815. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  816. return -EINPROGRESS;
  817. }
  818. /**
  819. * ipa_pm_activate(): activate ipa client to vote for clock(). Can be called
  820. * from atomic context and returns -EINPROGRESS if cannot be done synchronously
  821. * @hdl: index of the client in the array
  822. *
  823. * Returns: 0 on success, -EINPROGRESS if operation cannot be done synchronously
  824. * and other negatives on failure
  825. */
  826. int ipa_pm_activate(u32 hdl)
  827. {
  828. if (ipa_pm_ctx == NULL) {
  829. IPA_PM_ERR("PM_ctx is null\n");
  830. return -EINVAL;
  831. }
  832. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  833. IPA_PM_ERR("Invalid Param\n");
  834. return -EINVAL;
  835. }
  836. return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], false);
  837. }
  838. /**
  839. * ipa_pm_activate(): activate ipa client to vote for clock synchronously.
  840. * Cannot be called from an atomic contex.
  841. * @hdl: index of the client in the array
  842. *
  843. * Returns: 0 on success, negative on failure
  844. */
  845. int ipa_pm_activate_sync(u32 hdl)
  846. {
  847. if (ipa_pm_ctx == NULL) {
  848. IPA_PM_ERR("PM_ctx is null\n");
  849. return -EINVAL;
  850. }
  851. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  852. IPA_PM_ERR("Invalid Param\n");
  853. return -EINVAL;
  854. }
  855. return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], true);
  856. }
  857. /**
  858. * ipa_pm_deferred_deactivate(): schedule a timer to deactivate client and
  859. * devote clock. Can be called from atomic context (asynchronously)
  860. * @hdl: index of the client in the array
  861. *
  862. * Returns: 0 on success, negative on failure
  863. */
  864. int ipa_pm_deferred_deactivate(u32 hdl)
  865. {
  866. struct ipa_pm_client *client;
  867. unsigned long flags;
  868. unsigned long delay;
  869. if (ipa_pm_ctx == NULL) {
  870. IPA_PM_ERR("PM_ctx is null\n");
  871. return -EINVAL;
  872. }
  873. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  874. IPA_PM_ERR("Invalid Param\n");
  875. return -EINVAL;
  876. }
  877. client = ipa_pm_ctx->clients[hdl];
  878. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  879. spin_lock_irqsave(&client->state_lock, flags);
  880. switch (client->state) {
  881. case IPA_PM_ACTIVATE_IN_PROGRESS:
  882. client->state = IPA_PM_DEACTIVATE_IN_PROGRESS;
  883. case IPA_PM_DEACTIVATED:
  884. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  885. spin_unlock_irqrestore(&client->state_lock, flags);
  886. return 0;
  887. case IPA_PM_ACTIVATED:
  888. delay = IPA_PM_DEFERRED_TIMEOUT;
  889. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
  890. ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
  891. delay *= 5;
  892. client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
  893. queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
  894. msecs_to_jiffies(delay));
  895. break;
  896. case IPA_PM_ACTIVATED_TIMER_SET:
  897. case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
  898. client->state = IPA_PM_ACTIVATED_PENDING_RESCHEDULE;
  899. case IPA_PM_DEACTIVATE_IN_PROGRESS:
  900. case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
  901. break;
  902. case IPA_PM_STATE_MAX:
  903. default:
  904. IPA_PM_ERR("Bad State");
  905. spin_unlock_irqrestore(&client->state_lock, flags);
  906. return -EINVAL;
  907. }
  908. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  909. spin_unlock_irqrestore(&client->state_lock, flags);
  910. return 0;
  911. }
  912. /**
  913. * ipa_pm_deactivate_all_deferred(): Cancel the deferred deactivation timer and
  914. * immediately devotes for IPA clocks
  915. *
  916. * Returns: 0 on success, negative on failure
  917. */
  918. int ipa_pm_deactivate_all_deferred(void)
  919. {
  920. int i;
  921. bool run_algorithm = false;
  922. struct ipa_pm_client *client;
  923. unsigned long flags;
  924. if (ipa_pm_ctx == NULL) {
  925. IPA_PM_ERR("PM_ctx is null\n");
  926. return -EINVAL;
  927. }
  928. for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
  929. client = ipa_pm_ctx->clients[i];
  930. if (client == NULL)
  931. continue;
  932. cancel_delayed_work_sync(&client->deactivate_work);
  933. if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
  934. wait_for_completion(&client->complete);
  935. continue;
  936. }
  937. spin_lock_irqsave(&client->state_lock, flags);
  938. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  939. if (client->state == IPA_PM_ACTIVATED_TIMER_SET) {
  940. client->state = IPA_PM_ACTIVATED;
  941. IPA_PM_DBG_STATE(client->hdl, client->name,
  942. client->state);
  943. spin_unlock_irqrestore(&client->state_lock, flags);
  944. } else if (client->state ==
  945. IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
  946. client->state ==
  947. IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
  948. run_algorithm = true;
  949. client->state = IPA_PM_DEACTIVATED;
  950. IPA_PM_DBG_STATE(client->hdl, client->name,
  951. client->state);
  952. spin_unlock_irqrestore(&client->state_lock, flags);
  953. if (!client->skip_clk_vote) {
  954. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  955. if (client->group == IPA_PM_GROUP_APPS)
  956. __pm_relax(&client->wlock);
  957. }
  958. deactivate_client(client->hdl);
  959. } else /* if activated or deactivated, we do nothing */
  960. spin_unlock_irqrestore(&client->state_lock, flags);
  961. }
  962. if (run_algorithm)
  963. do_clk_scaling();
  964. return 0;
  965. }
  966. /**
  967. * ipa_pm_deactivate_sync(): deactivate ipa client and devote clock. Cannot be
  968. * called from atomic context.
  969. * @hdl: index of the client in the array
  970. *
  971. * Returns: 0 on success, negative on failure
  972. */
  973. int ipa_pm_deactivate_sync(u32 hdl)
  974. {
  975. struct ipa_pm_client *client;
  976. unsigned long flags;
  977. if (ipa_pm_ctx == NULL) {
  978. IPA_PM_ERR("PM_ctx is null\n");
  979. return -EINVAL;
  980. }
  981. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  982. IPA_PM_ERR("Invalid Param\n");
  983. return -EINVAL;
  984. }
  985. client = ipa_pm_ctx->clients[hdl];
  986. cancel_delayed_work_sync(&client->deactivate_work);
  987. if (IPA_PM_STATE_IN_PROGRESS(client->state))
  988. wait_for_completion(&client->complete);
  989. spin_lock_irqsave(&client->state_lock, flags);
  990. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  991. if (client->state == IPA_PM_DEACTIVATED) {
  992. spin_unlock_irqrestore(&client->state_lock, flags);
  993. return 0;
  994. }
  995. spin_unlock_irqrestore(&client->state_lock, flags);
  996. /* else case (Deactivates all Activated cases)*/
  997. if (!client->skip_clk_vote) {
  998. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  999. if (client->group == IPA_PM_GROUP_APPS)
  1000. __pm_relax(&client->wlock);
  1001. }
  1002. spin_lock_irqsave(&client->state_lock, flags);
  1003. client->state = IPA_PM_DEACTIVATED;
  1004. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  1005. spin_unlock_irqrestore(&client->state_lock, flags);
  1006. deactivate_client(hdl);
  1007. do_clk_scaling();
  1008. return 0;
  1009. }
  1010. /**
  1011. * ipa_pm_handle_suspend(): calls the callbacks of suspended clients to wake up
  1012. * @pipe_bitmask: the bits represent the indexes of the clients to be woken up
  1013. *
  1014. * Returns: 0 on success, negative on failure
  1015. */
  1016. int ipa_pm_handle_suspend(u32 pipe_bitmask)
  1017. {
  1018. int i;
  1019. struct ipa_pm_client *client;
  1020. bool client_notified[IPA_PM_MAX_CLIENTS] = { false };
  1021. if (ipa_pm_ctx == NULL) {
  1022. IPA_PM_ERR("PM_ctx is null\n");
  1023. return -EINVAL;
  1024. }
  1025. IPA_PM_DBG_LOW("bitmask: %d", pipe_bitmask);
  1026. if (pipe_bitmask == 0)
  1027. return 0;
  1028. mutex_lock(&ipa_pm_ctx->client_mutex);
  1029. for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
  1030. if (pipe_bitmask & (1 << i)) {
  1031. client = ipa_pm_ctx->clients_by_pipe[i];
  1032. if (client && !client_notified[client->hdl]) {
  1033. if (client->callback) {
  1034. client->callback(client->callback_params
  1035. , IPA_PM_REQUEST_WAKEUP);
  1036. client_notified[client->hdl] = true;
  1037. } else {
  1038. IPA_PM_ERR("client has no callback");
  1039. WARN_ON(1);
  1040. }
  1041. }
  1042. }
  1043. }
  1044. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1045. return 0;
  1046. }
  1047. /**
  1048. * ipa_pm_set_throughput(): Adds/changes the throughput requirement to IPA PM
  1049. * to be used for clock scaling
  1050. * @hdl: index of the client in the array
  1051. * @throughput: the new throughput value to be set for that client
  1052. *
  1053. * Returns: 0 on success, negative on failure
  1054. */
  1055. int ipa_pm_set_throughput(u32 hdl, int throughput)
  1056. {
  1057. struct ipa_pm_client *client;
  1058. unsigned long flags;
  1059. if (ipa_pm_ctx == NULL) {
  1060. IPA_PM_ERR("PM_ctx is null\n");
  1061. return -EINVAL;
  1062. }
  1063. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL
  1064. || throughput < 0) {
  1065. IPA_PM_ERR("Invalid Params\n");
  1066. return -EINVAL;
  1067. }
  1068. client = ipa_pm_ctx->clients[hdl];
  1069. mutex_lock(&ipa_pm_ctx->client_mutex);
  1070. if (client->group == IPA_PM_GROUP_DEFAULT)
  1071. IPA_PM_DBG_LOW("Old throughput: %d\n", client->throughput);
  1072. else
  1073. IPA_PM_DBG_LOW("old Group %d throughput: %d\n",
  1074. client->group, ipa_pm_ctx->group_tput[client->group]);
  1075. if (client->group == IPA_PM_GROUP_DEFAULT)
  1076. client->throughput = throughput;
  1077. else
  1078. ipa_pm_ctx->group_tput[client->group] = throughput;
  1079. if (client->group == IPA_PM_GROUP_DEFAULT)
  1080. IPA_PM_DBG_LOW("New throughput: %d\n", client->throughput);
  1081. else
  1082. IPA_PM_DBG_LOW("New Group %d throughput: %d\n",
  1083. client->group, ipa_pm_ctx->group_tput[client->group]);
  1084. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1085. spin_lock_irqsave(&client->state_lock, flags);
  1086. if (IPA_PM_STATE_ACTIVE(client->state) || (client->group !=
  1087. IPA_PM_GROUP_DEFAULT)) {
  1088. spin_unlock_irqrestore(&client->state_lock, flags);
  1089. do_clk_scaling();
  1090. return 0;
  1091. }
  1092. spin_unlock_irqrestore(&client->state_lock, flags);
  1093. return 0;
  1094. }
  1095. void ipa_pm_set_clock_index(int index)
  1096. {
  1097. if (ipa_pm_ctx && index >= 0)
  1098. ipa_pm_ctx->clk_scaling.cur_vote = index;
  1099. IPA_PM_DBG("Setting pm clock vote to %d\n", index);
  1100. }
  1101. /**
  1102. * ipa_pm_stat() - print PM stat
  1103. * @buf: [in] The user buff used to print
  1104. * @size: [in] The size of buf
  1105. * Returns: number of bytes used on success, negative on failure
  1106. *
  1107. * This function is called by ipa_debugfs in order to receive
  1108. * a picture of the clients in the PM and the throughput, threshold and cur vote
  1109. */
  1110. int ipa_pm_stat(char *buf, int size)
  1111. {
  1112. struct ipa_pm_client *client;
  1113. struct clk_scaling_db *clk = &ipa_pm_ctx->clk_scaling;
  1114. int i, j, tput, cnt = 0, result = 0;
  1115. unsigned long flags;
  1116. if (!buf || size < 0)
  1117. return -EINVAL;
  1118. mutex_lock(&ipa_pm_ctx->client_mutex);
  1119. result = scnprintf(buf + cnt, size - cnt, "\n\nCurrent threshold: [");
  1120. cnt += result;
  1121. for (i = 0; i < clk->threshold_size; i++) {
  1122. result = scnprintf(buf + cnt, size - cnt,
  1123. "%d, ", clk->current_threshold[i]);
  1124. cnt += result;
  1125. }
  1126. result = scnprintf(buf + cnt, size - cnt, "\b\b]\n");
  1127. cnt += result;
  1128. result = scnprintf(buf + cnt, size - cnt,
  1129. "Aggregated tput: %d, Cur vote: %d",
  1130. ipa_pm_ctx->aggregated_tput, clk->cur_vote);
  1131. cnt += result;
  1132. result = scnprintf(buf + cnt, size - cnt, "\n\nRegistered Clients:\n");
  1133. cnt += result;
  1134. for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
  1135. client = ipa_pm_ctx->clients[i];
  1136. if (client == NULL)
  1137. continue;
  1138. spin_lock_irqsave(&client->state_lock, flags);
  1139. if (client->group == IPA_PM_GROUP_DEFAULT)
  1140. tput = client->throughput;
  1141. else
  1142. tput = ipa_pm_ctx->group_tput[client->group];
  1143. result = scnprintf(buf + cnt, size - cnt,
  1144. "Client[%d]: %s State:%s\nGroup: %s Throughput: %d Pipes: ",
  1145. i, client->name, client_state_to_str[client->state],
  1146. ipa_pm_group_to_str[client->group], tput);
  1147. cnt += result;
  1148. for (j = 0; j < IPA3_MAX_NUM_PIPES; j++) {
  1149. if (ipa_pm_ctx->clients_by_pipe[j] == client) {
  1150. result = scnprintf(buf + cnt, size - cnt,
  1151. "%d, ", j);
  1152. cnt += result;
  1153. }
  1154. }
  1155. result = scnprintf(buf + cnt, size - cnt, "\b\b\n\n");
  1156. cnt += result;
  1157. spin_unlock_irqrestore(&client->state_lock, flags);
  1158. }
  1159. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1160. return cnt;
  1161. }
  1162. /**
  1163. * ipa_pm_exceptions_stat() - print PM exceptions stat
  1164. * @buf: [in] The user buff used to print
  1165. * @size: [in] The size of buf
  1166. * Returns: number of bytes used on success, negative on failure
  1167. *
  1168. * This function is called by ipa_debugfs in order to receive
  1169. * a full picture of the exceptions in the PM
  1170. */
  1171. int ipa_pm_exceptions_stat(char *buf, int size)
  1172. {
  1173. int i, j, cnt = 0, result = 0;
  1174. struct ipa_pm_exception_list *exception;
  1175. if (!buf || size < 0)
  1176. return -EINVAL;
  1177. result = scnprintf(buf + cnt, size - cnt, "\n");
  1178. cnt += result;
  1179. mutex_lock(&ipa_pm_ctx->client_mutex);
  1180. for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
  1181. exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
  1182. if (exception == NULL) {
  1183. result = scnprintf(buf + cnt, size - cnt,
  1184. "Exception %d is NULL\n\n", i);
  1185. cnt += result;
  1186. continue;
  1187. }
  1188. result = scnprintf(buf + cnt, size - cnt,
  1189. "Exception %d: %s\nPending: %d Bitmask: %d Threshold: ["
  1190. , i, exception->clients, exception->pending,
  1191. exception->bitmask);
  1192. cnt += result;
  1193. for (j = 0; j < ipa_pm_ctx->clk_scaling.threshold_size; j++) {
  1194. result = scnprintf(buf + cnt, size - cnt,
  1195. "%d, ", exception->threshold[j]);
  1196. cnt += result;
  1197. }
  1198. result = scnprintf(buf + cnt, size - cnt, "\b\b]\n\n");
  1199. cnt += result;
  1200. }
  1201. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1202. return cnt;
  1203. }