ipa_pm.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include "ipa_pm.h"
  7. #include "ipa_i.h"
  8. #define IPA_PM_DRV_NAME "ipa_pm"
  9. #define IPA_PM_DBG(fmt, args...) \
  10. do { \
  11. pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
  12. __func__, __LINE__, ## args); \
  13. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  14. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  15. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  16. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  17. } while (0)
  18. #define IPA_PM_DBG_LOW(fmt, args...) \
  19. do { \
  20. pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
  21. __func__, __LINE__, ## args); \
  22. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  23. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  24. } while (0)
  25. #define IPA_PM_ERR(fmt, args...) \
  26. do { \
  27. pr_err(IPA_PM_DRV_NAME " %s:%d " fmt, \
  28. __func__, __LINE__, ## args); \
  29. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  30. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  31. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  32. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  33. } while (0)
  34. #define IPA_PM_DBG_STATE(hdl, name, state) \
  35. IPA_PM_DBG_LOW("Client[%d] %s: %s\n", hdl, name, \
  36. client_state_to_str[state])
  37. #if IPA_PM_MAX_CLIENTS > 32
  38. #error max client greater than 32 all bitmask types should be changed
  39. #endif
  40. /*
  41. * struct ipa_pm_exception_list - holds information about an exception
  42. * @pending: number of clients in exception that have not yet been adctivated
  43. * @bitmask: bitmask of the clients in the exception based on handle
  44. * @threshold: the threshold values for the exception
  45. */
  46. struct ipa_pm_exception_list {
  47. char clients[IPA_PM_MAX_EX_CL];
  48. int pending;
  49. u32 bitmask;
  50. int threshold[IPA_PM_THRESHOLD_MAX];
  51. };
  52. /*
  53. * struct clk_scaling_db - holds information about threshholds and exceptions
  54. * @lock: lock the bitmasks and thresholds
  55. * @exception_list: pointer to the list of exceptions
  56. * @work: work for clock scaling algorithm
  57. * @active_client_bitmask: the bits represent handles in the clients array that
  58. * contain non-null client
  59. * @threshold_size: size of the throughput threshold
  60. * @exception_size: size of the exception list
  61. * @cur_vote: idx of the threshold
  62. * @default_threshold: the thresholds used if no exception passes
  63. * @current_threshold: the current threshold of the clock plan
  64. */
  65. struct clk_scaling_db {
  66. spinlock_t lock;
  67. struct ipa_pm_exception_list exception_list[IPA_PM_EXCEPTION_MAX];
  68. struct work_struct work;
  69. u32 active_client_bitmask;
  70. int threshold_size;
  71. int exception_size;
  72. int cur_vote;
  73. int default_threshold[IPA_PM_THRESHOLD_MAX];
  74. int *current_threshold;
  75. };
  76. /*
  77. * ipa_pm state names
  78. *
  79. * Timer free states:
  80. * @IPA_PM_DEACTIVATED: client starting state when registered
  81. * @IPA_PM_DEACTIVATE_IN_PROGRESS: deactivate was called in progress of a client
  82. * activating
  83. * @IPA_PM_ACTIVATE_IN_PROGRESS: client is being activated by work_queue
  84. * @IPA_PM_ACTIVATED: client is activated without any timers
  85. *
  86. * Timer set states:
  87. * @IPA_PM_ACTIVATED_PENDING_DEACTIVATION: moves to deactivate once timer pass
  88. * @IPA_PM_ACTIVATED_TIMER_SET: client was activated while timer was set, so
  89. * when the timer pass, client will still be activated
  90. *@IPA_PM_ACTIVATED_PENDING_RESCHEDULE: state signifying extended timer when
  91. * a client is deferred_deactivated when a time ris still active
  92. */
  93. enum ipa_pm_state {
  94. IPA_PM_DEACTIVATED,
  95. IPA_PM_DEACTIVATE_IN_PROGRESS,
  96. IPA_PM_ACTIVATE_IN_PROGRESS,
  97. IPA_PM_ACTIVATED,
  98. IPA_PM_ACTIVATED_PENDING_DEACTIVATION,
  99. IPA_PM_ACTIVATED_TIMER_SET,
  100. IPA_PM_ACTIVATED_PENDING_RESCHEDULE,
  101. IPA_PM_STATE_MAX
  102. };
  103. #define IPA_PM_STATE_ACTIVE(state) \
  104. (state == IPA_PM_ACTIVATED ||\
  105. state == IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||\
  106. state == IPA_PM_ACTIVATED_TIMER_SET ||\
  107. state == IPA_PM_ACTIVATED_PENDING_RESCHEDULE)
  108. #define IPA_PM_STATE_IN_PROGRESS(state) \
  109. (state == IPA_PM_ACTIVATE_IN_PROGRESS \
  110. || state == IPA_PM_DEACTIVATE_IN_PROGRESS)
  111. /*
  112. * struct ipa_pm_client - holds information about a specific IPA client
  113. * @name: string name of the client
  114. * @callback: pointer to the client's callback function
  115. * @callback_params: pointer to the client's callback parameters
  116. * @state: Activation state of the client
  117. * @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote
  118. * @group: the ipa_pm_group the client belongs to
  119. * @hdl: handle of the client
  120. * @throughput: the throughput of the client for clock scaling
  121. * @state_lock: spinlock to lock the pm_states
  122. * @activate_work: work for activate (blocking case)
  123. * @deactivate work: delayed work for deferred_deactivate function
  124. * @complete: generic wait-for-completion handler
  125. * @wlock: wake source to prevent AP suspend
  126. */
  127. struct ipa_pm_client {
  128. char name[IPA_PM_MAX_EX_CL];
  129. void (*callback)(void *user_data, enum ipa_pm_cb_event);
  130. void *callback_params;
  131. enum ipa_pm_state state;
  132. bool skip_clk_vote;
  133. int group;
  134. int hdl;
  135. int throughput;
  136. spinlock_t state_lock;
  137. struct work_struct activate_work;
  138. struct delayed_work deactivate_work;
  139. struct completion complete;
  140. struct wakeup_source *wlock;
  141. };
  142. /*
  143. * struct ipa_pm_ctx - global ctx that will hold the client arrays and tput info
  144. * @clients: array to the clients with the handle as its index
  145. * @clients_by_pipe: array to the clients with endpoint as the index
  146. * @wq: work queue for deferred deactivate, activate, and clk_scaling work
  147. 8 @clk_scaling: pointer to clock scaling database
  148. * @client_mutex: global mutex to lock the client arrays
  149. * @aggragated_tput: aggragated tput value of all valid activated clients
  150. * @group_tput: combined throughput for the groups
  151. */
  152. struct ipa_pm_ctx {
  153. struct ipa_pm_client *clients[IPA_PM_MAX_CLIENTS];
  154. struct ipa_pm_client *clients_by_pipe[IPA3_MAX_NUM_PIPES];
  155. struct workqueue_struct *wq;
  156. struct clk_scaling_db clk_scaling;
  157. struct mutex client_mutex;
  158. int aggregated_tput;
  159. int group_tput[IPA_PM_GROUP_MAX];
  160. };
  161. static struct ipa_pm_ctx *ipa_pm_ctx;
  162. static const char *client_state_to_str[IPA_PM_STATE_MAX] = {
  163. __stringify(IPA_PM_DEACTIVATED),
  164. __stringify(IPA_PM_DEACTIVATE_IN_PROGRESS),
  165. __stringify(IPA_PM_ACTIVATE_IN_PROGRESS),
  166. __stringify(IPA_PM_ACTIVATED),
  167. __stringify(IPA_PM_ACTIVATED_PENDING_DEACTIVATION),
  168. __stringify(IPA_PM_ACTIVATED_TIMER_SET),
  169. __stringify(IPA_PM_ACTIVATED_PENDING_RESCHEDULE),
  170. };
  171. static const char *ipa_pm_group_to_str[IPA_PM_GROUP_MAX] = {
  172. __stringify(IPA_PM_GROUP_DEFAULT),
  173. __stringify(IPA_PM_GROUP_APPS),
  174. __stringify(IPA_PM_GROUP_MODEM),
  175. };
  176. /**
  177. * pop_max_from_array() -pop the max and move the last element to where the
  178. * max was popped
  179. * @arr: array to be searched for max
  180. * @n: size of the array
  181. *
  182. * Returns: max value of the array
  183. */
  184. static int pop_max_from_array(int *arr, int *n)
  185. {
  186. int i;
  187. int max, max_idx;
  188. max_idx = *n - 1;
  189. max = 0;
  190. if (*n == 0)
  191. return 0;
  192. for (i = 0; i < *n; i++) {
  193. if (arr[i] > max) {
  194. max = arr[i];
  195. max_idx = i;
  196. }
  197. }
  198. (*n)--;
  199. arr[max_idx] = arr[*n];
  200. return max;
  201. }
  202. /**
  203. * calculate_throughput() - calculate the aggregated throughput
  204. * based on active clients
  205. *
  206. * Returns: aggregated tput value
  207. */
  208. static int calculate_throughput(void)
  209. {
  210. int client_tput[IPA_PM_MAX_CLIENTS] = { 0 };
  211. bool group_voted[IPA_PM_GROUP_MAX] = { false };
  212. int i, n;
  213. int max, second_max, aggregated_tput;
  214. struct ipa_pm_client *client;
  215. /* Create a basic array to hold throughputs*/
  216. for (i = 1, n = 0; i < IPA_PM_MAX_CLIENTS; i++) {
  217. client = ipa_pm_ctx->clients[i];
  218. if (client != NULL && IPA_PM_STATE_ACTIVE(client->state)) {
  219. /* default case */
  220. if (client->group == IPA_PM_GROUP_DEFAULT) {
  221. client_tput[n++] = client->throughput;
  222. } else if (!group_voted[client->group]) {
  223. client_tput[n++] = ipa_pm_ctx->group_tput
  224. [client->group];
  225. group_voted[client->group] = true;
  226. }
  227. }
  228. }
  229. /*the array will only use n+1 spots. n will be the last index used*/
  230. aggregated_tput = 0;
  231. /**
  232. * throughput algorithm:
  233. * 1) pop the max and second_max
  234. * 2) add the 2nd max to aggregated tput
  235. * 3) insert the value of max - 2nd max
  236. * 4) repeat until array is of size 1
  237. */
  238. while (n > 1) {
  239. max = pop_max_from_array(client_tput, &n);
  240. second_max = pop_max_from_array(client_tput, &n);
  241. client_tput[n++] = max - second_max;
  242. aggregated_tput += second_max;
  243. }
  244. IPA_PM_DBG_LOW("Aggregated throughput: %d\n", aggregated_tput);
  245. return aggregated_tput;
  246. }
  247. /**
  248. * deactivate_client() - turn off the bit in the active client bitmask based on
  249. * the handle passed in
  250. * @hdl: The index of the client to be deactivated
  251. */
  252. static void deactivate_client(u32 hdl)
  253. {
  254. unsigned long flags;
  255. spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
  256. ipa_pm_ctx->clk_scaling.active_client_bitmask &= ~(1 << hdl);
  257. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
  258. IPA_PM_DBG_LOW("active bitmask: %x\n",
  259. ipa_pm_ctx->clk_scaling.active_client_bitmask);
  260. }
  261. /**
  262. * activate_client() - turn on the bit in the active client bitmask based on
  263. * the handle passed in
  264. * @hdl: The index of the client to be activated
  265. */
  266. static void activate_client(u32 hdl)
  267. {
  268. unsigned long flags;
  269. spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
  270. ipa_pm_ctx->clk_scaling.active_client_bitmask |= (1 << hdl);
  271. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
  272. IPA_PM_DBG_LOW("active bitmask: %x\n",
  273. ipa_pm_ctx->clk_scaling.active_client_bitmask);
  274. }
  275. /**
  276. * deactivate_client() - get threshold
  277. *
  278. * Returns: threshold of the exception that passes or default if none pass
  279. */
  280. static void set_current_threshold(void)
  281. {
  282. int i;
  283. struct clk_scaling_db *clk;
  284. struct ipa_pm_exception_list *exception;
  285. unsigned long flags;
  286. clk = &ipa_pm_ctx->clk_scaling;
  287. spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
  288. for (i = 0; i < clk->exception_size; i++) {
  289. exception = &clk->exception_list[i];
  290. if (exception->pending == 0 && (exception->bitmask
  291. & ~clk->active_client_bitmask) == 0) {
  292. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock,
  293. flags);
  294. clk->current_threshold = exception->threshold;
  295. IPA_PM_DBG("Exception %d set\n", i);
  296. return;
  297. }
  298. }
  299. clk->current_threshold = clk->default_threshold;
  300. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
  301. }
  302. /**
  303. * do_clk_scaling() - set the clock based on the activated clients
  304. *
  305. * Returns: 0 if success, negative otherwise
  306. */
  307. static int do_clk_scaling(void)
  308. {
  309. int i, tput;
  310. int new_th_idx = 1;
  311. struct clk_scaling_db *clk_scaling;
  312. if (atomic_read(&ipa3_ctx->ipa_clk_vote) == 0) {
  313. IPA_PM_DBG("IPA clock is gated\n");
  314. return 0;
  315. }
  316. clk_scaling = &ipa_pm_ctx->clk_scaling;
  317. mutex_lock(&ipa_pm_ctx->client_mutex);
  318. IPA_PM_DBG_LOW("clock scaling started\n");
  319. tput = calculate_throughput();
  320. ipa_pm_ctx->aggregated_tput = tput;
  321. set_current_threshold();
  322. mutex_unlock(&ipa_pm_ctx->client_mutex);
  323. for (i = 0; i < clk_scaling->threshold_size; i++) {
  324. if (tput >= clk_scaling->current_threshold[i])
  325. new_th_idx++;
  326. }
  327. IPA_PM_DBG_LOW("old idx was at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
  328. if (ipa_pm_ctx->clk_scaling.cur_vote != new_th_idx) {
  329. ipa_pm_ctx->clk_scaling.cur_vote = new_th_idx;
  330. ipa3_set_clock_plan_from_pm(ipa_pm_ctx->clk_scaling.cur_vote);
  331. }
  332. IPA_PM_DBG_LOW("new idx is at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
  333. return 0;
  334. }
  335. /**
  336. * clock_scaling_func() - set the clock on a work queue
  337. */
  338. static void clock_scaling_func(struct work_struct *work)
  339. {
  340. do_clk_scaling();
  341. }
  342. /**
  343. * activate_work_func - activate a client and vote for clock on a work queue
  344. */
  345. static void activate_work_func(struct work_struct *work)
  346. {
  347. struct ipa_pm_client *client;
  348. bool dec_clk = false;
  349. unsigned long flags;
  350. client = container_of(work, struct ipa_pm_client, activate_work);
  351. if (!client->skip_clk_vote) {
  352. IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
  353. if (client->group == IPA_PM_GROUP_APPS)
  354. __pm_stay_awake(client->wlock);
  355. }
  356. spin_lock_irqsave(&client->state_lock, flags);
  357. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  358. if (client->state == IPA_PM_ACTIVATE_IN_PROGRESS) {
  359. client->state = IPA_PM_ACTIVATED;
  360. } else if (client->state == IPA_PM_DEACTIVATE_IN_PROGRESS) {
  361. client->state = IPA_PM_DEACTIVATED;
  362. dec_clk = true;
  363. } else {
  364. IPA_PM_ERR("unexpected state %d\n", client->state);
  365. WARN_ON(1);
  366. }
  367. spin_unlock_irqrestore(&client->state_lock, flags);
  368. complete_all(&client->complete);
  369. if (dec_clk) {
  370. if (!client->skip_clk_vote) {
  371. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  372. if (client->group == IPA_PM_GROUP_APPS)
  373. __pm_relax(client->wlock);
  374. }
  375. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  376. return;
  377. }
  378. activate_client(client->hdl);
  379. mutex_lock(&ipa_pm_ctx->client_mutex);
  380. if (client->callback) {
  381. client->callback(client->callback_params,
  382. IPA_PM_CLIENT_ACTIVATED);
  383. } else {
  384. IPA_PM_ERR("client has no callback");
  385. WARN_ON(1);
  386. }
  387. mutex_unlock(&ipa_pm_ctx->client_mutex);
  388. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  389. do_clk_scaling();
  390. }
  391. /**
  392. * delayed_deferred_deactivate_work_func - deferred deactivate on a work queue
  393. */
  394. static void delayed_deferred_deactivate_work_func(struct work_struct *work)
  395. {
  396. struct delayed_work *dwork;
  397. struct ipa_pm_client *client;
  398. unsigned long flags;
  399. unsigned long delay;
  400. dwork = container_of(work, struct delayed_work, work);
  401. client = container_of(dwork, struct ipa_pm_client, deactivate_work);
  402. spin_lock_irqsave(&client->state_lock, flags);
  403. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  404. switch (client->state) {
  405. case IPA_PM_ACTIVATED_TIMER_SET:
  406. client->state = IPA_PM_ACTIVATED;
  407. goto bail;
  408. case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
  409. delay = IPA_PM_DEFERRED_TIMEOUT;
  410. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
  411. ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
  412. delay *= 5;
  413. queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
  414. msecs_to_jiffies(delay));
  415. client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
  416. goto bail;
  417. case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
  418. client->state = IPA_PM_DEACTIVATED;
  419. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  420. spin_unlock_irqrestore(&client->state_lock, flags);
  421. if (!client->skip_clk_vote) {
  422. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  423. if (client->group == IPA_PM_GROUP_APPS)
  424. __pm_relax(client->wlock);
  425. }
  426. deactivate_client(client->hdl);
  427. do_clk_scaling();
  428. return;
  429. default:
  430. IPA_PM_ERR("unexpected state %d\n", client->state);
  431. WARN_ON(1);
  432. goto bail;
  433. }
  434. bail:
  435. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  436. spin_unlock_irqrestore(&client->state_lock, flags);
  437. }
  438. static int find_next_open_array_element(const char *name)
  439. {
  440. int i, n;
  441. n = -ENOBUFS;
  442. /* 0 is not a valid handle */
  443. for (i = IPA_PM_MAX_CLIENTS - 1; i >= 1; i--) {
  444. if (ipa_pm_ctx->clients[i] == NULL) {
  445. n = i;
  446. continue;
  447. }
  448. if (strlen(name) == strlen(ipa_pm_ctx->clients[i]->name))
  449. if (!strcmp(name, ipa_pm_ctx->clients[i]->name))
  450. return -EEXIST;
  451. }
  452. return n;
  453. }
  454. /**
  455. * add_client_to_exception_list() - add client to the exception list and
  456. * update pending if necessary
  457. * @hdl: index of the IPA client
  458. *
  459. * Returns: 0 if success, negative otherwise
  460. */
  461. static int add_client_to_exception_list(u32 hdl)
  462. {
  463. int i, len = 0;
  464. struct ipa_pm_exception_list *exception;
  465. mutex_lock(&ipa_pm_ctx->client_mutex);
  466. len = strlen(ipa_pm_ctx->clients[hdl]->name);
  467. for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
  468. exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
  469. if (strnstr(exception->clients, ipa_pm_ctx->clients[hdl]->name,
  470. len) && (strlen(exception->clients)
  471. == len)) {
  472. exception->pending--;
  473. IPA_PM_DBG("Pending: %d\n",
  474. exception->pending);
  475. if (exception->pending < 0) {
  476. WARN_ON(1);
  477. exception->pending = 0;
  478. mutex_unlock(&ipa_pm_ctx->client_mutex);
  479. return -EPERM;
  480. }
  481. exception->bitmask |= (1 << hdl);
  482. }
  483. }
  484. IPA_PM_DBG("%s added to exception list\n",
  485. ipa_pm_ctx->clients[hdl]->name);
  486. mutex_unlock(&ipa_pm_ctx->client_mutex);
  487. return 0;
  488. }
  489. /**
  490. * remove_client_to_exception_list() - remove client from the exception list and
  491. * update pending if necessary
  492. * @hdl: index of the IPA client
  493. *
  494. * Returns: 0 if success, negative otherwise
  495. */
  496. static int remove_client_from_exception_list(u32 hdl)
  497. {
  498. int i;
  499. struct ipa_pm_exception_list *exception;
  500. for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
  501. exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
  502. if (exception->bitmask & (1 << hdl)) {
  503. exception->pending++;
  504. IPA_PM_DBG("Pending: %d\n",
  505. exception->pending);
  506. exception->bitmask &= ~(1 << hdl);
  507. }
  508. }
  509. IPA_PM_DBG("Client %d removed from exception list\n", hdl);
  510. return 0;
  511. }
  512. /**
  513. * ipa_pm_init() - initialize IPA PM Components
  514. * @ipa_pm_init_params: parameters needed to fill exceptions and thresholds
  515. *
  516. * Returns: 0 on success, negative on failure
  517. */
  518. int ipa_pm_init(struct ipa_pm_init_params *params)
  519. {
  520. int i, j;
  521. struct clk_scaling_db *clk_scaling;
  522. if (params == NULL) {
  523. IPA_PM_ERR("Invalid Params\n");
  524. return -EINVAL;
  525. }
  526. if (params->threshold_size <= 0
  527. || params->threshold_size > IPA_PM_THRESHOLD_MAX) {
  528. IPA_PM_ERR("Invalid threshold size\n");
  529. return -EINVAL;
  530. }
  531. if (params->exception_size < 0
  532. || params->exception_size > IPA_PM_EXCEPTION_MAX) {
  533. IPA_PM_ERR("Invalid exception size\n");
  534. return -EINVAL;
  535. }
  536. IPA_PM_DBG("IPA PM initialization started\n");
  537. if (ipa_pm_ctx != NULL) {
  538. IPA_PM_ERR("Already initialized\n");
  539. return -EPERM;
  540. }
  541. ipa_pm_ctx = kzalloc(sizeof(*ipa_pm_ctx), GFP_KERNEL);
  542. if (!ipa_pm_ctx) {
  543. IPA_PM_ERR(":kzalloc err.\n");
  544. return -ENOMEM;
  545. }
  546. ipa_pm_ctx->wq = create_singlethread_workqueue("ipa_pm_activate");
  547. if (!ipa_pm_ctx->wq) {
  548. IPA_PM_ERR("create workqueue failed\n");
  549. kfree(ipa_pm_ctx);
  550. return -ENOMEM;
  551. }
  552. mutex_init(&ipa_pm_ctx->client_mutex);
  553. /* Populate and init locks in clk_scaling_db */
  554. clk_scaling = &ipa_pm_ctx->clk_scaling;
  555. spin_lock_init(&clk_scaling->lock);
  556. clk_scaling->threshold_size = params->threshold_size;
  557. clk_scaling->exception_size = params->exception_size;
  558. INIT_WORK(&clk_scaling->work, clock_scaling_func);
  559. for (i = 0; i < params->threshold_size; i++)
  560. clk_scaling->default_threshold[i] =
  561. params->default_threshold[i];
  562. /* Populate exception list*/
  563. for (i = 0; i < params->exception_size; i++) {
  564. strlcpy(clk_scaling->exception_list[i].clients,
  565. params->exceptions[i].usecase, IPA_PM_MAX_EX_CL);
  566. IPA_PM_DBG("Usecase: %s\n", params->exceptions[i].usecase);
  567. /* Parse the commas to count the size of the clients */
  568. for (j = 0; j < IPA_PM_MAX_EX_CL &&
  569. clk_scaling->exception_list[i].clients[j]; j++) {
  570. if (clk_scaling->exception_list[i].clients[j] == ',')
  571. clk_scaling->exception_list[i].pending++;
  572. }
  573. /* for the first client */
  574. clk_scaling->exception_list[i].pending++;
  575. IPA_PM_DBG("Pending: %d\n",
  576. clk_scaling->exception_list[i].pending);
  577. /* populate the threshold */
  578. for (j = 0; j < params->threshold_size; j++) {
  579. clk_scaling->exception_list[i].threshold[j]
  580. = params->exceptions[i].threshold[j];
  581. }
  582. }
  583. IPA_PM_DBG("initialization success");
  584. return 0;
  585. }
  586. int ipa_pm_destroy(void)
  587. {
  588. IPA_PM_DBG("IPA PM destroy started\n");
  589. if (ipa_pm_ctx == NULL) {
  590. IPA_PM_ERR("Already destroyed\n");
  591. return -EPERM;
  592. }
  593. destroy_workqueue(ipa_pm_ctx->wq);
  594. kfree(ipa_pm_ctx);
  595. ipa_pm_ctx = NULL;
  596. return 0;
  597. }
  598. /**
  599. * ipa_pm_register() - register an IPA PM client with the PM
  600. * @register_params: params for a client like throughput, callback, etc.
  601. * @hdl: int pointer that will be used as an index to access the client
  602. *
  603. * Returns: 0 on success, negative on failure
  604. *
  605. * Side effects: *hdl is replaced with the client index or -EEXIST if
  606. * client is already registered
  607. */
  608. int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl)
  609. {
  610. struct ipa_pm_client *client;
  611. int elem;
  612. if (ipa_pm_ctx == NULL) {
  613. IPA_PM_ERR("PM_ctx is null\n");
  614. return -EINVAL;
  615. }
  616. if (params == NULL || hdl == NULL || params->name == NULL) {
  617. IPA_PM_ERR("Invalid Params\n");
  618. return -EINVAL;
  619. }
  620. IPA_PM_DBG("IPA PM registering client\n");
  621. mutex_lock(&ipa_pm_ctx->client_mutex);
  622. elem = find_next_open_array_element(params->name);
  623. *hdl = elem;
  624. if (elem < 0 || elem > IPA_PM_MAX_CLIENTS) {
  625. mutex_unlock(&ipa_pm_ctx->client_mutex);
  626. IPA_PM_ERR("client already registered or full array elem=%d\n",
  627. elem);
  628. return elem;
  629. }
  630. ipa_pm_ctx->clients[*hdl] = kzalloc(sizeof
  631. (struct ipa_pm_client), GFP_KERNEL);
  632. if (!ipa_pm_ctx->clients[*hdl]) {
  633. mutex_unlock(&ipa_pm_ctx->client_mutex);
  634. IPA_PM_ERR(":kzalloc err.\n");
  635. return -ENOMEM;
  636. }
  637. mutex_unlock(&ipa_pm_ctx->client_mutex);
  638. client = ipa_pm_ctx->clients[*hdl];
  639. spin_lock_init(&client->state_lock);
  640. INIT_DELAYED_WORK(&client->deactivate_work,
  641. delayed_deferred_deactivate_work_func);
  642. INIT_WORK(&client->activate_work, activate_work_func);
  643. /* populate fields */
  644. strlcpy(client->name, params->name, IPA_PM_MAX_EX_CL);
  645. client->callback = params->callback;
  646. client->callback_params = params->user_data;
  647. client->group = params->group;
  648. client->hdl = *hdl;
  649. client->skip_clk_vote = params->skip_clk_vote;
  650. client->wlock = wakeup_source_register(NULL, client->name);
  651. if (!client->wlock) {
  652. ipa_pm_deregister(*hdl);
  653. IPA_PM_ERR("IPA wakeup source register failed %s\n",
  654. client->name);
  655. return -ENOMEM;
  656. }
  657. init_completion(&client->complete);
  658. /* add client to exception list */
  659. if (add_client_to_exception_list(*hdl)) {
  660. ipa_pm_deregister(*hdl);
  661. IPA_PM_ERR("Fail to add client to exception_list\n");
  662. return -EPERM;
  663. }
  664. IPA_PM_DBG("IPA PM client registered with handle %d\n", *hdl);
  665. return 0;
  666. }
  667. EXPORT_SYMBOL(ipa_pm_register);
  668. /**
  669. * ipa_pm_deregister() - deregister IPA client from the PM
  670. * @hdl: index of the client in the array
  671. *
  672. * Returns: 0 on success, negative on failure
  673. */
  674. int ipa_pm_deregister(u32 hdl)
  675. {
  676. struct ipa_pm_client *client;
  677. int i;
  678. unsigned long flags;
  679. if (ipa_pm_ctx == NULL) {
  680. IPA_PM_ERR("PM_ctx is null\n");
  681. return -EINVAL;
  682. }
  683. if (hdl >= IPA_PM_MAX_CLIENTS) {
  684. IPA_PM_ERR("Invalid Param\n");
  685. return -EINVAL;
  686. }
  687. if (ipa_pm_ctx->clients[hdl] == NULL) {
  688. IPA_PM_ERR("Client is Null\n");
  689. return -EINVAL;
  690. }
  691. IPA_PM_DBG("IPA PM deregistering client\n");
  692. client = ipa_pm_ctx->clients[hdl];
  693. spin_lock_irqsave(&client->state_lock, flags);
  694. if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
  695. spin_unlock_irqrestore(&client->state_lock, flags);
  696. wait_for_completion(&client->complete);
  697. spin_lock_irqsave(&client->state_lock, flags);
  698. }
  699. if (IPA_PM_STATE_ACTIVE(client->state)) {
  700. IPA_PM_DBG("Activated clients cannot be deregistered");
  701. spin_unlock_irqrestore(&client->state_lock, flags);
  702. return -EPERM;
  703. }
  704. spin_unlock_irqrestore(&client->state_lock, flags);
  705. mutex_lock(&ipa_pm_ctx->client_mutex);
  706. /* nullify pointers in pipe array */
  707. for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
  708. if (ipa_pm_ctx->clients_by_pipe[i] == ipa_pm_ctx->clients[hdl])
  709. ipa_pm_ctx->clients_by_pipe[i] = NULL;
  710. }
  711. wakeup_source_unregister(client->wlock);
  712. kfree(client);
  713. ipa_pm_ctx->clients[hdl] = NULL;
  714. remove_client_from_exception_list(hdl);
  715. IPA_PM_DBG("IPA PM client %d deregistered\n", hdl);
  716. mutex_unlock(&ipa_pm_ctx->client_mutex);
  717. return 0;
  718. }
  719. EXPORT_SYMBOL(ipa_pm_deregister);
  720. /**
  721. * ipa_pm_associate_ipa_cons_to_client() - add mapping to pipe with ipa cllent
  722. * @hdl: index of the client to be mapped
  723. * @consumer: the pipe/consumer name to be pipped to the client
  724. *
  725. * Returns: 0 on success, negative on failure
  726. *
  727. * Side effects: multiple pipes are allowed to be mapped to a single client
  728. */
  729. int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer)
  730. {
  731. int idx;
  732. if (ipa_pm_ctx == NULL) {
  733. IPA_PM_ERR("PM_ctx is null\n");
  734. return -EINVAL;
  735. }
  736. if (hdl >= IPA_PM_MAX_CLIENTS || consumer < 0 ||
  737. consumer >= IPA_CLIENT_MAX) {
  738. IPA_PM_ERR("invalid params\n");
  739. return -EINVAL;
  740. }
  741. mutex_lock(&ipa_pm_ctx->client_mutex);
  742. if (ipa_pm_ctx->clients[hdl] == NULL) {
  743. mutex_unlock(&ipa_pm_ctx->client_mutex);
  744. IPA_PM_ERR("Client is NULL\n");
  745. return -EPERM;
  746. }
  747. idx = ipa_get_ep_mapping(consumer);
  748. if (idx < 0) {
  749. mutex_unlock(&ipa_pm_ctx->client_mutex);
  750. IPA_PM_DBG("Pipe is not used\n");
  751. return 0;
  752. }
  753. IPA_PM_DBG("Mapping pipe %d to client %d\n", idx, hdl);
  754. if (ipa_pm_ctx->clients_by_pipe[idx] != NULL) {
  755. mutex_unlock(&ipa_pm_ctx->client_mutex);
  756. IPA_PM_ERR("Pipe is already mapped\n");
  757. return -EPERM;
  758. }
  759. ipa_pm_ctx->clients_by_pipe[idx] = ipa_pm_ctx->clients[hdl];
  760. mutex_unlock(&ipa_pm_ctx->client_mutex);
  761. IPA_PM_DBG("Pipe %d is mapped to client %d\n", idx, hdl);
  762. return 0;
  763. }
  764. EXPORT_SYMBOL(ipa_pm_associate_ipa_cons_to_client);
  765. static int ipa_pm_activate_helper(struct ipa_pm_client *client, bool sync)
  766. {
  767. struct ipa_active_client_logging_info log_info;
  768. int result = 0;
  769. unsigned long flags;
  770. spin_lock_irqsave(&client->state_lock, flags);
  771. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  772. if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
  773. if (sync) {
  774. spin_unlock_irqrestore(&client->state_lock, flags);
  775. wait_for_completion(&client->complete);
  776. spin_lock_irqsave(&client->state_lock, flags);
  777. } else {
  778. client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
  779. spin_unlock_irqrestore(&client->state_lock, flags);
  780. return -EINPROGRESS;
  781. }
  782. }
  783. switch (client->state) {
  784. case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
  785. case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
  786. client->state = IPA_PM_ACTIVATED_TIMER_SET;
  787. case IPA_PM_ACTIVATED:
  788. case IPA_PM_ACTIVATED_TIMER_SET:
  789. spin_unlock_irqrestore(&client->state_lock, flags);
  790. return 0;
  791. case IPA_PM_DEACTIVATED:
  792. break;
  793. default:
  794. IPA_PM_ERR("Invalid State\n");
  795. spin_unlock_irqrestore(&client->state_lock, flags);
  796. return -EPERM;
  797. }
  798. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  799. IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, client->name);
  800. if (!client->skip_clk_vote) {
  801. if (sync) {
  802. client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
  803. spin_unlock_irqrestore(&client->state_lock, flags);
  804. IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
  805. spin_lock_irqsave(&client->state_lock, flags);
  806. } else
  807. result = ipa3_inc_client_enable_clks_no_block
  808. (&log_info);
  809. }
  810. /* we got the clocks */
  811. if (result == 0) {
  812. client->state = IPA_PM_ACTIVATED;
  813. if (client->group == IPA_PM_GROUP_APPS)
  814. __pm_stay_awake(client->wlock);
  815. spin_unlock_irqrestore(&client->state_lock, flags);
  816. activate_client(client->hdl);
  817. if (sync)
  818. do_clk_scaling();
  819. else
  820. queue_work(ipa_pm_ctx->wq,
  821. &ipa_pm_ctx->clk_scaling.work);
  822. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  823. return 0;
  824. }
  825. client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
  826. reinit_completion(&client->complete);
  827. queue_work(ipa_pm_ctx->wq, &client->activate_work);
  828. spin_unlock_irqrestore(&client->state_lock, flags);
  829. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  830. return -EINPROGRESS;
  831. }
  832. /**
  833. * ipa_pm_activate(): activate ipa client to vote for clock(). Can be called
  834. * from atomic context and returns -EINPROGRESS if cannot be done synchronously
  835. * @hdl: index of the client in the array
  836. *
  837. * Returns: 0 on success, -EINPROGRESS if operation cannot be done synchronously
  838. * and other negatives on failure
  839. */
  840. int ipa_pm_activate(u32 hdl)
  841. {
  842. if (ipa_pm_ctx == NULL) {
  843. IPA_PM_ERR("PM_ctx is null\n");
  844. return -EINVAL;
  845. }
  846. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  847. IPA_PM_ERR("Invalid Param\n");
  848. return -EINVAL;
  849. }
  850. return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], false);
  851. }
  852. EXPORT_SYMBOL(ipa_pm_activate);
  853. /**
  854. * ipa_pm_activate(): activate ipa client to vote for clock synchronously.
  855. * Cannot be called from an atomic contex.
  856. * @hdl: index of the client in the array
  857. *
  858. * Returns: 0 on success, negative on failure
  859. */
  860. int ipa_pm_activate_sync(u32 hdl)
  861. {
  862. if (ipa_pm_ctx == NULL) {
  863. IPA_PM_ERR("PM_ctx is null\n");
  864. return -EINVAL;
  865. }
  866. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  867. IPA_PM_ERR("Invalid Param\n");
  868. return -EINVAL;
  869. }
  870. return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], true);
  871. }
  872. EXPORT_SYMBOL(ipa_pm_activate_sync);
  873. /**
  874. * ipa_pm_deferred_deactivate(): schedule a timer to deactivate client and
  875. * devote clock. Can be called from atomic context (asynchronously)
  876. * @hdl: index of the client in the array
  877. *
  878. * Returns: 0 on success, negative on failure
  879. */
  880. int ipa_pm_deferred_deactivate(u32 hdl)
  881. {
  882. struct ipa_pm_client *client;
  883. unsigned long flags;
  884. unsigned long delay;
  885. if (ipa_pm_ctx == NULL) {
  886. IPA_PM_ERR("PM_ctx is null\n");
  887. return -EINVAL;
  888. }
  889. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  890. IPA_PM_ERR("Invalid Param\n");
  891. return -EINVAL;
  892. }
  893. client = ipa_pm_ctx->clients[hdl];
  894. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  895. spin_lock_irqsave(&client->state_lock, flags);
  896. switch (client->state) {
  897. case IPA_PM_ACTIVATE_IN_PROGRESS:
  898. client->state = IPA_PM_DEACTIVATE_IN_PROGRESS;
  899. case IPA_PM_DEACTIVATED:
  900. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  901. spin_unlock_irqrestore(&client->state_lock, flags);
  902. return 0;
  903. case IPA_PM_ACTIVATED:
  904. delay = IPA_PM_DEFERRED_TIMEOUT;
  905. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
  906. ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
  907. delay *= 5;
  908. client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
  909. queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
  910. msecs_to_jiffies(delay));
  911. break;
  912. case IPA_PM_ACTIVATED_TIMER_SET:
  913. case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
  914. client->state = IPA_PM_ACTIVATED_PENDING_RESCHEDULE;
  915. case IPA_PM_DEACTIVATE_IN_PROGRESS:
  916. case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
  917. break;
  918. case IPA_PM_STATE_MAX:
  919. default:
  920. IPA_PM_ERR("Bad State");
  921. spin_unlock_irqrestore(&client->state_lock, flags);
  922. return -EINVAL;
  923. }
  924. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  925. spin_unlock_irqrestore(&client->state_lock, flags);
  926. return 0;
  927. }
  928. EXPORT_SYMBOL(ipa_pm_deferred_deactivate);
  929. /**
  930. * ipa_pm_deactivate_all_deferred(): Cancel the deferred deactivation timer and
  931. * immediately devotes for IPA clocks
  932. *
  933. * Returns: 0 on success, negative on failure
  934. */
  935. int ipa_pm_deactivate_all_deferred(void)
  936. {
  937. int i;
  938. bool run_algorithm = false;
  939. struct ipa_pm_client *client;
  940. unsigned long flags;
  941. if (ipa_pm_ctx == NULL) {
  942. IPA_PM_ERR("PM_ctx is null\n");
  943. return -EINVAL;
  944. }
  945. for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
  946. client = ipa_pm_ctx->clients[i];
  947. if (client == NULL)
  948. continue;
  949. cancel_delayed_work_sync(&client->deactivate_work);
  950. if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
  951. wait_for_completion(&client->complete);
  952. continue;
  953. }
  954. spin_lock_irqsave(&client->state_lock, flags);
  955. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  956. if (client->state == IPA_PM_ACTIVATED_TIMER_SET) {
  957. client->state = IPA_PM_ACTIVATED;
  958. IPA_PM_DBG_STATE(client->hdl, client->name,
  959. client->state);
  960. spin_unlock_irqrestore(&client->state_lock, flags);
  961. } else if (client->state ==
  962. IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
  963. client->state ==
  964. IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
  965. run_algorithm = true;
  966. client->state = IPA_PM_DEACTIVATED;
  967. IPA_PM_DBG_STATE(client->hdl, client->name,
  968. client->state);
  969. spin_unlock_irqrestore(&client->state_lock, flags);
  970. if (!client->skip_clk_vote) {
  971. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  972. if (client->group == IPA_PM_GROUP_APPS)
  973. __pm_relax(client->wlock);
  974. }
  975. deactivate_client(client->hdl);
  976. } else /* if activated or deactivated, we do nothing */
  977. spin_unlock_irqrestore(&client->state_lock, flags);
  978. }
  979. if (run_algorithm)
  980. do_clk_scaling();
  981. return 0;
  982. }
  983. /**
  984. * ipa_pm_deactivate_sync(): deactivate ipa client and devote clock. Cannot be
  985. * called from atomic context.
  986. * @hdl: index of the client in the array
  987. *
  988. * Returns: 0 on success, negative on failure
  989. */
  990. int ipa_pm_deactivate_sync(u32 hdl)
  991. {
  992. struct ipa_pm_client *client;
  993. unsigned long flags;
  994. if (ipa_pm_ctx == NULL) {
  995. IPA_PM_ERR("PM_ctx is null\n");
  996. return -EINVAL;
  997. }
  998. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  999. IPA_PM_ERR("Invalid Param\n");
  1000. return -EINVAL;
  1001. }
  1002. client = ipa_pm_ctx->clients[hdl];
  1003. cancel_delayed_work_sync(&client->deactivate_work);
  1004. if (IPA_PM_STATE_IN_PROGRESS(client->state))
  1005. wait_for_completion(&client->complete);
  1006. spin_lock_irqsave(&client->state_lock, flags);
  1007. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  1008. if (client->state == IPA_PM_DEACTIVATED) {
  1009. spin_unlock_irqrestore(&client->state_lock, flags);
  1010. return 0;
  1011. }
  1012. spin_unlock_irqrestore(&client->state_lock, flags);
  1013. /* else case (Deactivates all Activated cases)*/
  1014. if (!client->skip_clk_vote) {
  1015. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  1016. if (client->group == IPA_PM_GROUP_APPS)
  1017. __pm_relax(client->wlock);
  1018. }
  1019. spin_lock_irqsave(&client->state_lock, flags);
  1020. client->state = IPA_PM_DEACTIVATED;
  1021. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  1022. spin_unlock_irqrestore(&client->state_lock, flags);
  1023. deactivate_client(hdl);
  1024. do_clk_scaling();
  1025. return 0;
  1026. }
  1027. EXPORT_SYMBOL(ipa_pm_deactivate_sync);
  1028. /**
  1029. * ipa_pm_handle_suspend(): calls the callbacks of suspended clients to wake up
  1030. * @pipe_bitmask: the bits represent the indexes of the clients to be woken up
  1031. *
  1032. * Returns: 0 on success, negative on failure
  1033. */
  1034. int ipa_pm_handle_suspend(u32 pipe_bitmask)
  1035. {
  1036. int i;
  1037. struct ipa_pm_client *client;
  1038. bool client_notified[IPA_PM_MAX_CLIENTS] = { false };
  1039. if (ipa_pm_ctx == NULL) {
  1040. IPA_PM_ERR("PM_ctx is null\n");
  1041. return -EINVAL;
  1042. }
  1043. IPA_PM_DBG_LOW("bitmask: %d", pipe_bitmask);
  1044. if (pipe_bitmask == 0)
  1045. return 0;
  1046. mutex_lock(&ipa_pm_ctx->client_mutex);
  1047. for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
  1048. if (pipe_bitmask & (1 << i)) {
  1049. client = ipa_pm_ctx->clients_by_pipe[i];
  1050. if (client && !client_notified[client->hdl]) {
  1051. if (client->callback) {
  1052. client->callback(client->callback_params
  1053. , IPA_PM_REQUEST_WAKEUP);
  1054. client_notified[client->hdl] = true;
  1055. } else {
  1056. IPA_PM_ERR("client has no callback");
  1057. WARN_ON(1);
  1058. }
  1059. }
  1060. }
  1061. }
  1062. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1063. return 0;
  1064. }
  1065. /**
  1066. * ipa_pm_set_throughput(): Adds/changes the throughput requirement to IPA PM
  1067. * to be used for clock scaling
  1068. * @hdl: index of the client in the array
  1069. * @throughput: the new throughput value to be set for that client
  1070. *
  1071. * Returns: 0 on success, negative on failure
  1072. */
  1073. int ipa_pm_set_throughput(u32 hdl, int throughput)
  1074. {
  1075. struct ipa_pm_client *client;
  1076. unsigned long flags;
  1077. if (ipa_pm_ctx == NULL) {
  1078. IPA_PM_ERR("PM_ctx is null\n");
  1079. return -EINVAL;
  1080. }
  1081. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL
  1082. || throughput < 0) {
  1083. IPA_PM_ERR("Invalid Params\n");
  1084. return -EINVAL;
  1085. }
  1086. client = ipa_pm_ctx->clients[hdl];
  1087. mutex_lock(&ipa_pm_ctx->client_mutex);
  1088. if (client->group == IPA_PM_GROUP_DEFAULT)
  1089. IPA_PM_DBG_LOW("Old throughput: %d\n", client->throughput);
  1090. else
  1091. IPA_PM_DBG_LOW("old Group %d throughput: %d\n",
  1092. client->group, ipa_pm_ctx->group_tput[client->group]);
  1093. if (client->group == IPA_PM_GROUP_DEFAULT)
  1094. client->throughput = throughput;
  1095. else
  1096. ipa_pm_ctx->group_tput[client->group] = throughput;
  1097. if (client->group == IPA_PM_GROUP_DEFAULT)
  1098. IPA_PM_DBG_LOW("New throughput: %d\n", client->throughput);
  1099. else
  1100. IPA_PM_DBG_LOW("New Group %d throughput: %d\n",
  1101. client->group, ipa_pm_ctx->group_tput[client->group]);
  1102. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1103. spin_lock_irqsave(&client->state_lock, flags);
  1104. if (IPA_PM_STATE_ACTIVE(client->state) || (client->group !=
  1105. IPA_PM_GROUP_DEFAULT)) {
  1106. spin_unlock_irqrestore(&client->state_lock, flags);
  1107. do_clk_scaling();
  1108. return 0;
  1109. }
  1110. spin_unlock_irqrestore(&client->state_lock, flags);
  1111. return 0;
  1112. }
  1113. EXPORT_SYMBOL(ipa_pm_set_throughput);
  1114. void ipa_pm_set_clock_index(int index)
  1115. {
  1116. if (ipa_pm_ctx && index >= 0)
  1117. ipa_pm_ctx->clk_scaling.cur_vote = index;
  1118. IPA_PM_DBG("Setting pm clock vote to %d\n", index);
  1119. }
  1120. /**
  1121. * ipa_pm_stat() - print PM stat
  1122. * @buf: [in] The user buff used to print
  1123. * @size: [in] The size of buf
  1124. * Returns: number of bytes used on success, negative on failure
  1125. *
  1126. * This function is called by ipa_debugfs in order to receive
  1127. * a picture of the clients in the PM and the throughput, threshold and cur vote
  1128. */
  1129. int ipa_pm_stat(char *buf, int size)
  1130. {
  1131. struct ipa_pm_client *client;
  1132. struct clk_scaling_db *clk = &ipa_pm_ctx->clk_scaling;
  1133. int i, j, tput, cnt = 0, result = 0;
  1134. unsigned long flags;
  1135. if (!buf || size < 0)
  1136. return -EINVAL;
  1137. mutex_lock(&ipa_pm_ctx->client_mutex);
  1138. result = scnprintf(buf + cnt, size - cnt, "\n\nCurrent threshold: [");
  1139. cnt += result;
  1140. for (i = 0; i < clk->threshold_size; i++) {
  1141. result = scnprintf(buf + cnt, size - cnt,
  1142. "%d, ", clk->current_threshold[i]);
  1143. cnt += result;
  1144. }
  1145. result = scnprintf(buf + cnt, size - cnt, "\b\b]\n");
  1146. cnt += result;
  1147. result = scnprintf(buf + cnt, size - cnt,
  1148. "Aggregated tput: %d, Cur vote: %d",
  1149. ipa_pm_ctx->aggregated_tput, clk->cur_vote);
  1150. cnt += result;
  1151. result = scnprintf(buf + cnt, size - cnt, "\n\nRegistered Clients:\n");
  1152. cnt += result;
  1153. for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
  1154. client = ipa_pm_ctx->clients[i];
  1155. if (client == NULL)
  1156. continue;
  1157. spin_lock_irqsave(&client->state_lock, flags);
  1158. if (client->group == IPA_PM_GROUP_DEFAULT)
  1159. tput = client->throughput;
  1160. else
  1161. tput = ipa_pm_ctx->group_tput[client->group];
  1162. result = scnprintf(buf + cnt, size - cnt,
  1163. "Client[%d]: %s State:%s\nGroup: %s Throughput: %d Pipes: ",
  1164. i, client->name, client_state_to_str[client->state],
  1165. ipa_pm_group_to_str[client->group], tput);
  1166. cnt += result;
  1167. for (j = 0; j < IPA3_MAX_NUM_PIPES; j++) {
  1168. if (ipa_pm_ctx->clients_by_pipe[j] == client) {
  1169. result = scnprintf(buf + cnt, size - cnt,
  1170. "%d, ", j);
  1171. cnt += result;
  1172. }
  1173. }
  1174. result = scnprintf(buf + cnt, size - cnt, "\b\b\n\n");
  1175. cnt += result;
  1176. spin_unlock_irqrestore(&client->state_lock, flags);
  1177. }
  1178. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1179. return cnt;
  1180. }
  1181. /**
  1182. * ipa_pm_exceptions_stat() - print PM exceptions stat
  1183. * @buf: [in] The user buff used to print
  1184. * @size: [in] The size of buf
  1185. * Returns: number of bytes used on success, negative on failure
  1186. *
  1187. * This function is called by ipa_debugfs in order to receive
  1188. * a full picture of the exceptions in the PM
  1189. */
  1190. int ipa_pm_exceptions_stat(char *buf, int size)
  1191. {
  1192. int i, j, cnt = 0, result = 0;
  1193. struct ipa_pm_exception_list *exception;
  1194. if (!buf || size < 0)
  1195. return -EINVAL;
  1196. result = scnprintf(buf + cnt, size - cnt, "\n");
  1197. cnt += result;
  1198. mutex_lock(&ipa_pm_ctx->client_mutex);
  1199. for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
  1200. exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
  1201. if (exception == NULL) {
  1202. result = scnprintf(buf + cnt, size - cnt,
  1203. "Exception %d is NULL\n\n", i);
  1204. cnt += result;
  1205. continue;
  1206. }
  1207. result = scnprintf(buf + cnt, size - cnt,
  1208. "Exception %d: %s\nPending: %d Bitmask: %d Threshold: ["
  1209. , i, exception->clients, exception->pending,
  1210. exception->bitmask);
  1211. cnt += result;
  1212. for (j = 0; j < ipa_pm_ctx->clk_scaling.threshold_size; j++) {
  1213. result = scnprintf(buf + cnt, size - cnt,
  1214. "%d, ", exception->threshold[j]);
  1215. cnt += result;
  1216. }
  1217. result = scnprintf(buf + cnt, size - cnt, "\b\b]\n\n");
  1218. cnt += result;
  1219. }
  1220. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1221. return cnt;
  1222. }