ipa_pm.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include "ipa_pm.h"
  7. #include "ipa_i.h"
  8. #define IPA_PM_DRV_NAME "ipa_pm"
  9. #define IPA_PM_DBG(fmt, args...) \
  10. do { \
  11. pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
  12. __func__, __LINE__, ## args); \
  13. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  14. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  15. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  16. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  17. } while (0)
  18. #define IPA_PM_DBG_LOW(fmt, args...) \
  19. do { \
  20. pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
  21. __func__, __LINE__, ## args); \
  22. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  23. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  24. } while (0)
  25. #define IPA_PM_ERR(fmt, args...) \
  26. do { \
  27. pr_err(IPA_PM_DRV_NAME " %s:%d " fmt, \
  28. __func__, __LINE__, ## args); \
  29. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  30. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  31. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  32. IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
  33. } while (0)
  34. #define IPA_PM_DBG_STATE(hdl, name, state) \
  35. IPA_PM_DBG_LOW("Client[%d] %s: %s\n", hdl, name, \
  36. client_state_to_str[state])
  37. #if IPA_PM_MAX_CLIENTS > 32
  38. #error max client greater than 32 all bitmask types should be changed
  39. #endif
  40. /*
  41. * struct ipa_pm_exception_list - holds information about an exception
  42. * @pending: number of clients in exception that have not yet been adctivated
  43. * @bitmask: bitmask of the clients in the exception based on handle
  44. * @threshold: the threshold values for the exception
  45. */
  46. struct ipa_pm_exception_list {
  47. char clients[IPA_PM_MAX_EX_CL];
  48. int pending;
  49. u32 bitmask;
  50. int threshold[IPA_PM_THRESHOLD_MAX];
  51. };
  52. /*
  53. * struct clk_scaling_db - holds information about threshholds and exceptions
  54. * @lock: lock the bitmasks and thresholds
  55. * @exception_list: pointer to the list of exceptions
  56. * @work: work for clock scaling algorithm
  57. * @active_client_bitmask: the bits represent handles in the clients array that
  58. * contain non-null client
  59. * @threshold_size: size of the throughput threshold
  60. * @exception_size: size of the exception list
  61. * @cur_vote: idx of the threshold
  62. * @default_threshold: the thresholds used if no exception passes
  63. * @current_threshold: the current threshold of the clock plan
  64. */
  65. struct clk_scaling_db {
  66. spinlock_t lock;
  67. struct ipa_pm_exception_list exception_list[IPA_PM_EXCEPTION_MAX];
  68. struct work_struct work;
  69. u32 active_client_bitmask;
  70. int threshold_size;
  71. int exception_size;
  72. int cur_vote;
  73. int default_threshold[IPA_PM_THRESHOLD_MAX];
  74. int *current_threshold;
  75. };
  76. /*
  77. * ipa_pm state names
  78. *
  79. * Timer free states:
  80. * @IPA_PM_DEACTIVATED: client starting state when registered
  81. * @IPA_PM_DEACTIVATE_IN_PROGRESS: deactivate was called in progress of a client
  82. * activating
  83. * @IPA_PM_ACTIVATE_IN_PROGRESS: client is being activated by work_queue
  84. * @IPA_PM_ACTIVATED: client is activated without any timers
  85. *
  86. * Timer set states:
  87. * @IPA_PM_ACTIVATED_PENDING_DEACTIVATION: moves to deactivate once timer pass
  88. * @IPA_PM_ACTIVATED_TIMER_SET: client was activated while timer was set, so
  89. * when the timer pass, client will still be activated
  90. *@IPA_PM_ACTIVATED_PENDING_RESCHEDULE: state signifying extended timer when
  91. * a client is deferred_deactivated when a time ris still active
  92. */
  93. enum ipa_pm_state {
  94. IPA_PM_DEACTIVATED,
  95. IPA_PM_DEACTIVATE_IN_PROGRESS,
  96. IPA_PM_ACTIVATE_IN_PROGRESS,
  97. IPA_PM_ACTIVATED,
  98. IPA_PM_ACTIVATED_PENDING_DEACTIVATION,
  99. IPA_PM_ACTIVATED_TIMER_SET,
  100. IPA_PM_ACTIVATED_PENDING_RESCHEDULE,
  101. IPA_PM_STATE_MAX
  102. };
  103. #define IPA_PM_STATE_ACTIVE(state) \
  104. (state == IPA_PM_ACTIVATED ||\
  105. state == IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||\
  106. state == IPA_PM_ACTIVATED_TIMER_SET ||\
  107. state == IPA_PM_ACTIVATED_PENDING_RESCHEDULE)
  108. #define IPA_PM_STATE_IN_PROGRESS(state) \
  109. (state == IPA_PM_ACTIVATE_IN_PROGRESS \
  110. || state == IPA_PM_DEACTIVATE_IN_PROGRESS)
  111. /*
  112. * struct ipa_pm_client - holds information about a specific IPA client
  113. * @name: string name of the client
  114. * @callback: pointer to the client's callback function
  115. * @callback_params: pointer to the client's callback parameters
  116. * @state: Activation state of the client
  117. * @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote
  118. * @group: the ipa_pm_group the client belongs to
  119. * @hdl: handle of the client
  120. * @throughput: the throughput of the client for clock scaling
  121. * @state_lock: spinlock to lock the pm_states
  122. * @activate_work: work for activate (blocking case)
  123. * @deactivate work: delayed work for deferred_deactivate function
  124. * @complete: generic wait-for-completion handler
  125. * @wlock: wake source to prevent AP suspend
  126. */
  127. struct ipa_pm_client {
  128. char name[IPA_PM_MAX_EX_CL];
  129. void (*callback)(void *user_data, enum ipa_pm_cb_event);
  130. void *callback_params;
  131. enum ipa_pm_state state;
  132. bool skip_clk_vote;
  133. int group;
  134. int hdl;
  135. int throughput;
  136. spinlock_t state_lock;
  137. struct work_struct activate_work;
  138. struct delayed_work deactivate_work;
  139. struct completion complete;
  140. struct wakeup_source wlock;
  141. };
  142. /*
  143. * struct ipa_pm_ctx - global ctx that will hold the client arrays and tput info
  144. * @clients: array to the clients with the handle as its index
  145. * @clients_by_pipe: array to the clients with endpoint as the index
  146. * @wq: work queue for deferred deactivate, activate, and clk_scaling work
  147. 8 @clk_scaling: pointer to clock scaling database
  148. * @client_mutex: global mutex to lock the client arrays
  149. * @aggragated_tput: aggragated tput value of all valid activated clients
  150. * @group_tput: combined throughput for the groups
  151. */
  152. struct ipa_pm_ctx {
  153. struct ipa_pm_client *clients[IPA_PM_MAX_CLIENTS];
  154. struct ipa_pm_client *clients_by_pipe[IPA3_MAX_NUM_PIPES];
  155. struct workqueue_struct *wq;
  156. struct clk_scaling_db clk_scaling;
  157. struct mutex client_mutex;
  158. int aggregated_tput;
  159. int group_tput[IPA_PM_GROUP_MAX];
  160. };
  161. static struct ipa_pm_ctx *ipa_pm_ctx;
  162. static const char *client_state_to_str[IPA_PM_STATE_MAX] = {
  163. __stringify(IPA_PM_DEACTIVATED),
  164. __stringify(IPA_PM_DEACTIVATE_IN_PROGRESS),
  165. __stringify(IPA_PM_ACTIVATE_IN_PROGRESS),
  166. __stringify(IPA_PM_ACTIVATED),
  167. __stringify(IPA_PM_ACTIVATED_PENDING_DEACTIVATION),
  168. __stringify(IPA_PM_ACTIVATED_TIMER_SET),
  169. __stringify(IPA_PM_ACTIVATED_PENDING_RESCHEDULE),
  170. };
  171. static const char *ipa_pm_group_to_str[IPA_PM_GROUP_MAX] = {
  172. __stringify(IPA_PM_GROUP_DEFAULT),
  173. __stringify(IPA_PM_GROUP_APPS),
  174. __stringify(IPA_PM_GROUP_MODEM),
  175. };
  176. /**
  177. * pop_max_from_array() -pop the max and move the last element to where the
  178. * max was popped
  179. * @arr: array to be searched for max
  180. * @n: size of the array
  181. *
  182. * Returns: max value of the array
  183. */
  184. static int pop_max_from_array(int *arr, int *n)
  185. {
  186. int i;
  187. int max, max_idx;
  188. max_idx = *n - 1;
  189. max = 0;
  190. if (*n == 0)
  191. return 0;
  192. for (i = 0; i < *n; i++) {
  193. if (arr[i] > max) {
  194. max = arr[i];
  195. max_idx = i;
  196. }
  197. }
  198. (*n)--;
  199. arr[max_idx] = arr[*n];
  200. return max;
  201. }
  202. /**
  203. * calculate_throughput() - calculate the aggregated throughput
  204. * based on active clients
  205. *
  206. * Returns: aggregated tput value
  207. */
  208. static int calculate_throughput(void)
  209. {
  210. int client_tput[IPA_PM_MAX_CLIENTS] = { 0 };
  211. bool group_voted[IPA_PM_GROUP_MAX] = { false };
  212. int i, n;
  213. int max, second_max, aggregated_tput;
  214. struct ipa_pm_client *client;
  215. /* Create a basic array to hold throughputs*/
  216. for (i = 1, n = 0; i < IPA_PM_MAX_CLIENTS; i++) {
  217. client = ipa_pm_ctx->clients[i];
  218. if (client != NULL && IPA_PM_STATE_ACTIVE(client->state)) {
  219. /* default case */
  220. if (client->group == IPA_PM_GROUP_DEFAULT) {
  221. client_tput[n++] = client->throughput;
  222. } else if (!group_voted[client->group]) {
  223. client_tput[n++] = ipa_pm_ctx->group_tput
  224. [client->group];
  225. group_voted[client->group] = true;
  226. }
  227. }
  228. }
  229. /*the array will only use n+1 spots. n will be the last index used*/
  230. aggregated_tput = 0;
  231. /**
  232. * throughput algorithm:
  233. * 1) pop the max and second_max
  234. * 2) add the 2nd max to aggregated tput
  235. * 3) insert the value of max - 2nd max
  236. * 4) repeat until array is of size 1
  237. */
  238. while (n > 1) {
  239. max = pop_max_from_array(client_tput, &n);
  240. second_max = pop_max_from_array(client_tput, &n);
  241. client_tput[n++] = max - second_max;
  242. aggregated_tput += second_max;
  243. }
  244. IPA_PM_DBG_LOW("Aggregated throughput: %d\n", aggregated_tput);
  245. return aggregated_tput;
  246. }
  247. /**
  248. * deactivate_client() - turn off the bit in the active client bitmask based on
  249. * the handle passed in
  250. * @hdl: The index of the client to be deactivated
  251. */
  252. static void deactivate_client(u32 hdl)
  253. {
  254. unsigned long flags;
  255. spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
  256. ipa_pm_ctx->clk_scaling.active_client_bitmask &= ~(1 << hdl);
  257. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
  258. IPA_PM_DBG_LOW("active bitmask: %x\n",
  259. ipa_pm_ctx->clk_scaling.active_client_bitmask);
  260. }
  261. /**
  262. * activate_client() - turn on the bit in the active client bitmask based on
  263. * the handle passed in
  264. * @hdl: The index of the client to be activated
  265. */
  266. static void activate_client(u32 hdl)
  267. {
  268. unsigned long flags;
  269. spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
  270. ipa_pm_ctx->clk_scaling.active_client_bitmask |= (1 << hdl);
  271. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
  272. IPA_PM_DBG_LOW("active bitmask: %x\n",
  273. ipa_pm_ctx->clk_scaling.active_client_bitmask);
  274. }
  275. /**
  276. * deactivate_client() - get threshold
  277. *
  278. * Returns: threshold of the exception that passes or default if none pass
  279. */
  280. static void set_current_threshold(void)
  281. {
  282. int i;
  283. struct clk_scaling_db *clk;
  284. struct ipa_pm_exception_list *exception;
  285. unsigned long flags;
  286. clk = &ipa_pm_ctx->clk_scaling;
  287. spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
  288. for (i = 0; i < clk->exception_size; i++) {
  289. exception = &clk->exception_list[i];
  290. if (exception->pending == 0 && (exception->bitmask
  291. & ~clk->active_client_bitmask) == 0) {
  292. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock,
  293. flags);
  294. clk->current_threshold = exception->threshold;
  295. IPA_PM_DBG("Exception %d set\n", i);
  296. return;
  297. }
  298. }
  299. clk->current_threshold = clk->default_threshold;
  300. spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
  301. }
  302. /**
  303. * do_clk_scaling() - set the clock based on the activated clients
  304. *
  305. * Returns: 0 if success, negative otherwise
  306. */
  307. static int do_clk_scaling(void)
  308. {
  309. int i, tput;
  310. int new_th_idx = 1;
  311. struct clk_scaling_db *clk_scaling;
  312. if (atomic_read(&ipa3_ctx->ipa_clk_vote) == 0) {
  313. IPA_PM_DBG("IPA clock is gated\n");
  314. return 0;
  315. }
  316. clk_scaling = &ipa_pm_ctx->clk_scaling;
  317. mutex_lock(&ipa_pm_ctx->client_mutex);
  318. IPA_PM_DBG_LOW("clock scaling started\n");
  319. tput = calculate_throughput();
  320. ipa_pm_ctx->aggregated_tput = tput;
  321. set_current_threshold();
  322. mutex_unlock(&ipa_pm_ctx->client_mutex);
  323. for (i = 0; i < clk_scaling->threshold_size; i++) {
  324. if (tput >= clk_scaling->current_threshold[i])
  325. new_th_idx++;
  326. }
  327. IPA_PM_DBG_LOW("old idx was at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
  328. if (ipa_pm_ctx->clk_scaling.cur_vote != new_th_idx) {
  329. ipa_pm_ctx->clk_scaling.cur_vote = new_th_idx;
  330. ipa3_set_clock_plan_from_pm(ipa_pm_ctx->clk_scaling.cur_vote);
  331. }
  332. IPA_PM_DBG_LOW("new idx is at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
  333. return 0;
  334. }
  335. /**
  336. * clock_scaling_func() - set the clock on a work queue
  337. */
  338. static void clock_scaling_func(struct work_struct *work)
  339. {
  340. do_clk_scaling();
  341. }
  342. /**
  343. * activate_work_func - activate a client and vote for clock on a work queue
  344. */
  345. static void activate_work_func(struct work_struct *work)
  346. {
  347. struct ipa_pm_client *client;
  348. bool dec_clk = false;
  349. unsigned long flags;
  350. client = container_of(work, struct ipa_pm_client, activate_work);
  351. if (!client->skip_clk_vote) {
  352. IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
  353. if (client->group == IPA_PM_GROUP_APPS)
  354. __pm_stay_awake(&client->wlock);
  355. }
  356. spin_lock_irqsave(&client->state_lock, flags);
  357. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  358. if (client->state == IPA_PM_ACTIVATE_IN_PROGRESS) {
  359. client->state = IPA_PM_ACTIVATED;
  360. } else if (client->state == IPA_PM_DEACTIVATE_IN_PROGRESS) {
  361. client->state = IPA_PM_DEACTIVATED;
  362. dec_clk = true;
  363. } else {
  364. IPA_PM_ERR("unexpected state %d\n", client->state);
  365. WARN_ON(1);
  366. }
  367. spin_unlock_irqrestore(&client->state_lock, flags);
  368. complete_all(&client->complete);
  369. if (dec_clk) {
  370. if (!client->skip_clk_vote) {
  371. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  372. if (client->group == IPA_PM_GROUP_APPS)
  373. __pm_relax(&client->wlock);
  374. }
  375. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  376. return;
  377. }
  378. activate_client(client->hdl);
  379. mutex_lock(&ipa_pm_ctx->client_mutex);
  380. if (client->callback) {
  381. client->callback(client->callback_params,
  382. IPA_PM_CLIENT_ACTIVATED);
  383. } else {
  384. IPA_PM_ERR("client has no callback");
  385. WARN_ON(1);
  386. }
  387. mutex_unlock(&ipa_pm_ctx->client_mutex);
  388. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  389. do_clk_scaling();
  390. }
  391. /**
  392. * delayed_deferred_deactivate_work_func - deferred deactivate on a work queue
  393. */
  394. static void delayed_deferred_deactivate_work_func(struct work_struct *work)
  395. {
  396. struct delayed_work *dwork;
  397. struct ipa_pm_client *client;
  398. unsigned long flags;
  399. unsigned long delay;
  400. dwork = container_of(work, struct delayed_work, work);
  401. client = container_of(dwork, struct ipa_pm_client, deactivate_work);
  402. spin_lock_irqsave(&client->state_lock, flags);
  403. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  404. switch (client->state) {
  405. case IPA_PM_ACTIVATED_TIMER_SET:
  406. client->state = IPA_PM_ACTIVATED;
  407. goto bail;
  408. case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
  409. delay = IPA_PM_DEFERRED_TIMEOUT;
  410. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
  411. ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
  412. delay *= 5;
  413. queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
  414. msecs_to_jiffies(delay));
  415. client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
  416. goto bail;
  417. case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
  418. client->state = IPA_PM_DEACTIVATED;
  419. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  420. spin_unlock_irqrestore(&client->state_lock, flags);
  421. if (!client->skip_clk_vote) {
  422. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  423. if (client->group == IPA_PM_GROUP_APPS)
  424. __pm_relax(&client->wlock);
  425. }
  426. deactivate_client(client->hdl);
  427. do_clk_scaling();
  428. return;
  429. default:
  430. IPA_PM_ERR("unexpected state %d\n", client->state);
  431. WARN_ON(1);
  432. goto bail;
  433. }
  434. bail:
  435. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  436. spin_unlock_irqrestore(&client->state_lock, flags);
  437. }
  438. static int find_next_open_array_element(const char *name)
  439. {
  440. int i, n;
  441. n = -ENOBUFS;
  442. /* 0 is not a valid handle */
  443. for (i = IPA_PM_MAX_CLIENTS - 1; i >= 1; i--) {
  444. if (ipa_pm_ctx->clients[i] == NULL) {
  445. n = i;
  446. continue;
  447. }
  448. if (strlen(name) == strlen(ipa_pm_ctx->clients[i]->name))
  449. if (!strcmp(name, ipa_pm_ctx->clients[i]->name))
  450. return -EEXIST;
  451. }
  452. return n;
  453. }
  454. /**
  455. * add_client_to_exception_list() - add client to the exception list and
  456. * update pending if necessary
  457. * @hdl: index of the IPA client
  458. *
  459. * Returns: 0 if success, negative otherwise
  460. */
  461. static int add_client_to_exception_list(u32 hdl)
  462. {
  463. int i, len = 0;
  464. struct ipa_pm_exception_list *exception;
  465. mutex_lock(&ipa_pm_ctx->client_mutex);
  466. len = strlen(ipa_pm_ctx->clients[hdl]->name);
  467. for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
  468. exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
  469. if (strnstr(exception->clients, ipa_pm_ctx->clients[hdl]->name,
  470. len) && (strlen(exception->clients)
  471. == len)) {
  472. exception->pending--;
  473. IPA_PM_DBG("Pending: %d\n",
  474. exception->pending);
  475. if (exception->pending < 0) {
  476. WARN_ON(1);
  477. exception->pending = 0;
  478. mutex_unlock(&ipa_pm_ctx->client_mutex);
  479. return -EPERM;
  480. }
  481. exception->bitmask |= (1 << hdl);
  482. }
  483. }
  484. IPA_PM_DBG("%s added to exception list\n",
  485. ipa_pm_ctx->clients[hdl]->name);
  486. mutex_unlock(&ipa_pm_ctx->client_mutex);
  487. return 0;
  488. }
  489. /**
  490. * remove_client_to_exception_list() - remove client from the exception list and
  491. * update pending if necessary
  492. * @hdl: index of the IPA client
  493. *
  494. * Returns: 0 if success, negative otherwise
  495. */
  496. static int remove_client_from_exception_list(u32 hdl)
  497. {
  498. int i;
  499. struct ipa_pm_exception_list *exception;
  500. for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
  501. exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
  502. if (exception->bitmask & (1 << hdl)) {
  503. exception->pending++;
  504. IPA_PM_DBG("Pending: %d\n",
  505. exception->pending);
  506. exception->bitmask &= ~(1 << hdl);
  507. }
  508. }
  509. IPA_PM_DBG("Client %d removed from exception list\n", hdl);
  510. return 0;
  511. }
  512. /**
  513. * ipa_pm_init() - initialize IPA PM Components
  514. * @ipa_pm_init_params: parameters needed to fill exceptions and thresholds
  515. *
  516. * Returns: 0 on success, negative on failure
  517. */
  518. int ipa_pm_init(struct ipa_pm_init_params *params)
  519. {
  520. int i, j;
  521. struct clk_scaling_db *clk_scaling;
  522. if (params == NULL) {
  523. IPA_PM_ERR("Invalid Params\n");
  524. return -EINVAL;
  525. }
  526. if (params->threshold_size <= 0
  527. || params->threshold_size > IPA_PM_THRESHOLD_MAX) {
  528. IPA_PM_ERR("Invalid threshold size\n");
  529. return -EINVAL;
  530. }
  531. if (params->exception_size < 0
  532. || params->exception_size > IPA_PM_EXCEPTION_MAX) {
  533. IPA_PM_ERR("Invalid exception size\n");
  534. return -EINVAL;
  535. }
  536. IPA_PM_DBG("IPA PM initialization started\n");
  537. if (ipa_pm_ctx != NULL) {
  538. IPA_PM_ERR("Already initialized\n");
  539. return -EPERM;
  540. }
  541. ipa_pm_ctx = kzalloc(sizeof(*ipa_pm_ctx), GFP_KERNEL);
  542. if (!ipa_pm_ctx) {
  543. IPA_PM_ERR(":kzalloc err.\n");
  544. return -ENOMEM;
  545. }
  546. ipa_pm_ctx->wq = create_singlethread_workqueue("ipa_pm_activate");
  547. if (!ipa_pm_ctx->wq) {
  548. IPA_PM_ERR("create workqueue failed\n");
  549. kfree(ipa_pm_ctx);
  550. return -ENOMEM;
  551. }
  552. mutex_init(&ipa_pm_ctx->client_mutex);
  553. /* Populate and init locks in clk_scaling_db */
  554. clk_scaling = &ipa_pm_ctx->clk_scaling;
  555. spin_lock_init(&clk_scaling->lock);
  556. clk_scaling->threshold_size = params->threshold_size;
  557. clk_scaling->exception_size = params->exception_size;
  558. INIT_WORK(&clk_scaling->work, clock_scaling_func);
  559. for (i = 0; i < params->threshold_size; i++)
  560. clk_scaling->default_threshold[i] =
  561. params->default_threshold[i];
  562. /* Populate exception list*/
  563. for (i = 0; i < params->exception_size; i++) {
  564. strlcpy(clk_scaling->exception_list[i].clients,
  565. params->exceptions[i].usecase, IPA_PM_MAX_EX_CL);
  566. IPA_PM_DBG("Usecase: %s\n", params->exceptions[i].usecase);
  567. /* Parse the commas to count the size of the clients */
  568. for (j = 0; j < IPA_PM_MAX_EX_CL &&
  569. clk_scaling->exception_list[i].clients[j]; j++) {
  570. if (clk_scaling->exception_list[i].clients[j] == ',')
  571. clk_scaling->exception_list[i].pending++;
  572. }
  573. /* for the first client */
  574. clk_scaling->exception_list[i].pending++;
  575. IPA_PM_DBG("Pending: %d\n",
  576. clk_scaling->exception_list[i].pending);
  577. /* populate the threshold */
  578. for (j = 0; j < params->threshold_size; j++) {
  579. clk_scaling->exception_list[i].threshold[j]
  580. = params->exceptions[i].threshold[j];
  581. }
  582. }
  583. IPA_PM_DBG("initialization success");
  584. return 0;
  585. }
  586. int ipa_pm_destroy(void)
  587. {
  588. IPA_PM_DBG("IPA PM destroy started\n");
  589. if (ipa_pm_ctx == NULL) {
  590. IPA_PM_ERR("Already destroyed\n");
  591. return -EPERM;
  592. }
  593. destroy_workqueue(ipa_pm_ctx->wq);
  594. kfree(ipa_pm_ctx);
  595. ipa_pm_ctx = NULL;
  596. return 0;
  597. }
  598. /**
  599. * ipa_pm_register() - register an IPA PM client with the PM
  600. * @register_params: params for a client like throughput, callback, etc.
  601. * @hdl: int pointer that will be used as an index to access the client
  602. *
  603. * Returns: 0 on success, negative on failure
  604. *
  605. * Side effects: *hdl is replaced with the client index or -EEXIST if
  606. * client is already registered
  607. */
  608. int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl)
  609. {
  610. struct ipa_pm_client *client;
  611. struct wakeup_source *wlock;
  612. int elem;
  613. if (ipa_pm_ctx == NULL) {
  614. IPA_PM_ERR("PM_ctx is null\n");
  615. return -EINVAL;
  616. }
  617. if (params == NULL || hdl == NULL || params->name == NULL) {
  618. IPA_PM_ERR("Invalid Params\n");
  619. return -EINVAL;
  620. }
  621. IPA_PM_DBG("IPA PM registering client\n");
  622. mutex_lock(&ipa_pm_ctx->client_mutex);
  623. elem = find_next_open_array_element(params->name);
  624. *hdl = elem;
  625. if (elem < 0 || elem > IPA_PM_MAX_CLIENTS) {
  626. mutex_unlock(&ipa_pm_ctx->client_mutex);
  627. IPA_PM_ERR("client already registered or full array elem=%d\n",
  628. elem);
  629. return elem;
  630. }
  631. ipa_pm_ctx->clients[*hdl] = kzalloc(sizeof
  632. (struct ipa_pm_client), GFP_KERNEL);
  633. if (!ipa_pm_ctx->clients[*hdl]) {
  634. mutex_unlock(&ipa_pm_ctx->client_mutex);
  635. IPA_PM_ERR(":kzalloc err.\n");
  636. return -ENOMEM;
  637. }
  638. mutex_unlock(&ipa_pm_ctx->client_mutex);
  639. client = ipa_pm_ctx->clients[*hdl];
  640. spin_lock_init(&client->state_lock);
  641. INIT_DELAYED_WORK(&client->deactivate_work,
  642. delayed_deferred_deactivate_work_func);
  643. INIT_WORK(&client->activate_work, activate_work_func);
  644. /* populate fields */
  645. strlcpy(client->name, params->name, IPA_PM_MAX_EX_CL);
  646. client->callback = params->callback;
  647. client->callback_params = params->user_data;
  648. client->group = params->group;
  649. client->hdl = *hdl;
  650. client->skip_clk_vote = params->skip_clk_vote;
  651. wlock = &client->wlock;
  652. wakeup_source_init(wlock, client->name);
  653. init_completion(&client->complete);
  654. /* add client to exception list */
  655. if (add_client_to_exception_list(*hdl)) {
  656. ipa_pm_deregister(*hdl);
  657. IPA_PM_ERR("Fail to add client to exception_list\n");
  658. return -EPERM;
  659. }
  660. IPA_PM_DBG("IPA PM client registered with handle %d\n", *hdl);
  661. return 0;
  662. }
  663. /**
  664. * ipa_pm_deregister() - deregister IPA client from the PM
  665. * @hdl: index of the client in the array
  666. *
  667. * Returns: 0 on success, negative on failure
  668. */
  669. int ipa_pm_deregister(u32 hdl)
  670. {
  671. struct ipa_pm_client *client;
  672. int i;
  673. unsigned long flags;
  674. if (ipa_pm_ctx == NULL) {
  675. IPA_PM_ERR("PM_ctx is null\n");
  676. return -EINVAL;
  677. }
  678. if (hdl >= IPA_PM_MAX_CLIENTS) {
  679. IPA_PM_ERR("Invalid Param\n");
  680. return -EINVAL;
  681. }
  682. if (ipa_pm_ctx->clients[hdl] == NULL) {
  683. IPA_PM_ERR("Client is Null\n");
  684. return -EINVAL;
  685. }
  686. IPA_PM_DBG("IPA PM deregistering client\n");
  687. client = ipa_pm_ctx->clients[hdl];
  688. spin_lock_irqsave(&client->state_lock, flags);
  689. if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
  690. spin_unlock_irqrestore(&client->state_lock, flags);
  691. wait_for_completion(&client->complete);
  692. spin_lock_irqsave(&client->state_lock, flags);
  693. }
  694. if (IPA_PM_STATE_ACTIVE(client->state)) {
  695. IPA_PM_DBG("Activated clients cannot be deregistered");
  696. spin_unlock_irqrestore(&client->state_lock, flags);
  697. return -EPERM;
  698. }
  699. spin_unlock_irqrestore(&client->state_lock, flags);
  700. mutex_lock(&ipa_pm_ctx->client_mutex);
  701. /* nullify pointers in pipe array */
  702. for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
  703. if (ipa_pm_ctx->clients_by_pipe[i] == ipa_pm_ctx->clients[hdl])
  704. ipa_pm_ctx->clients_by_pipe[i] = NULL;
  705. }
  706. wakeup_source_trash(&client->wlock);
  707. kfree(client);
  708. ipa_pm_ctx->clients[hdl] = NULL;
  709. remove_client_from_exception_list(hdl);
  710. IPA_PM_DBG("IPA PM client %d deregistered\n", hdl);
  711. mutex_unlock(&ipa_pm_ctx->client_mutex);
  712. return 0;
  713. }
  714. /**
  715. * ipa_pm_associate_ipa_cons_to_client() - add mapping to pipe with ipa cllent
  716. * @hdl: index of the client to be mapped
  717. * @consumer: the pipe/consumer name to be pipped to the client
  718. *
  719. * Returns: 0 on success, negative on failure
  720. *
  721. * Side effects: multiple pipes are allowed to be mapped to a single client
  722. */
  723. int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer)
  724. {
  725. int idx;
  726. if (ipa_pm_ctx == NULL) {
  727. IPA_PM_ERR("PM_ctx is null\n");
  728. return -EINVAL;
  729. }
  730. if (hdl >= IPA_PM_MAX_CLIENTS || consumer < 0 ||
  731. consumer >= IPA_CLIENT_MAX) {
  732. IPA_PM_ERR("invalid params\n");
  733. return -EINVAL;
  734. }
  735. mutex_lock(&ipa_pm_ctx->client_mutex);
  736. if (ipa_pm_ctx->clients[hdl] == NULL) {
  737. mutex_unlock(&ipa_pm_ctx->client_mutex);
  738. IPA_PM_ERR("Client is NULL\n");
  739. return -EPERM;
  740. }
  741. idx = ipa_get_ep_mapping(consumer);
  742. if (idx < 0) {
  743. mutex_unlock(&ipa_pm_ctx->client_mutex);
  744. IPA_PM_DBG("Pipe is not used\n");
  745. return 0;
  746. }
  747. IPA_PM_DBG("Mapping pipe %d to client %d\n", idx, hdl);
  748. if (ipa_pm_ctx->clients_by_pipe[idx] != NULL) {
  749. mutex_unlock(&ipa_pm_ctx->client_mutex);
  750. IPA_PM_ERR("Pipe is already mapped\n");
  751. return -EPERM;
  752. }
  753. ipa_pm_ctx->clients_by_pipe[idx] = ipa_pm_ctx->clients[hdl];
  754. mutex_unlock(&ipa_pm_ctx->client_mutex);
  755. IPA_PM_DBG("Pipe %d is mapped to client %d\n", idx, hdl);
  756. return 0;
  757. }
  758. static int ipa_pm_activate_helper(struct ipa_pm_client *client, bool sync)
  759. {
  760. struct ipa_active_client_logging_info log_info;
  761. int result = 0;
  762. unsigned long flags;
  763. spin_lock_irqsave(&client->state_lock, flags);
  764. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  765. if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
  766. if (sync) {
  767. spin_unlock_irqrestore(&client->state_lock, flags);
  768. wait_for_completion(&client->complete);
  769. spin_lock_irqsave(&client->state_lock, flags);
  770. } else {
  771. client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
  772. spin_unlock_irqrestore(&client->state_lock, flags);
  773. return -EINPROGRESS;
  774. }
  775. }
  776. switch (client->state) {
  777. case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
  778. case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
  779. client->state = IPA_PM_ACTIVATED_TIMER_SET;
  780. case IPA_PM_ACTIVATED:
  781. case IPA_PM_ACTIVATED_TIMER_SET:
  782. spin_unlock_irqrestore(&client->state_lock, flags);
  783. return 0;
  784. case IPA_PM_DEACTIVATED:
  785. break;
  786. default:
  787. IPA_PM_ERR("Invalid State\n");
  788. spin_unlock_irqrestore(&client->state_lock, flags);
  789. return -EPERM;
  790. }
  791. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  792. IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, client->name);
  793. if (!client->skip_clk_vote) {
  794. if (sync) {
  795. client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
  796. spin_unlock_irqrestore(&client->state_lock, flags);
  797. IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
  798. spin_lock_irqsave(&client->state_lock, flags);
  799. } else
  800. result = ipa3_inc_client_enable_clks_no_block
  801. (&log_info);
  802. }
  803. /* we got the clocks */
  804. if (result == 0) {
  805. client->state = IPA_PM_ACTIVATED;
  806. if (client->group == IPA_PM_GROUP_APPS)
  807. __pm_stay_awake(&client->wlock);
  808. spin_unlock_irqrestore(&client->state_lock, flags);
  809. activate_client(client->hdl);
  810. if (sync)
  811. do_clk_scaling();
  812. else
  813. queue_work(ipa_pm_ctx->wq,
  814. &ipa_pm_ctx->clk_scaling.work);
  815. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  816. return 0;
  817. }
  818. client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
  819. reinit_completion(&client->complete);
  820. queue_work(ipa_pm_ctx->wq, &client->activate_work);
  821. spin_unlock_irqrestore(&client->state_lock, flags);
  822. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  823. return -EINPROGRESS;
  824. }
  825. /**
  826. * ipa_pm_activate(): activate ipa client to vote for clock(). Can be called
  827. * from atomic context and returns -EINPROGRESS if cannot be done synchronously
  828. * @hdl: index of the client in the array
  829. *
  830. * Returns: 0 on success, -EINPROGRESS if operation cannot be done synchronously
  831. * and other negatives on failure
  832. */
  833. int ipa_pm_activate(u32 hdl)
  834. {
  835. if (ipa_pm_ctx == NULL) {
  836. IPA_PM_ERR("PM_ctx is null\n");
  837. return -EINVAL;
  838. }
  839. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  840. IPA_PM_ERR("Invalid Param\n");
  841. return -EINVAL;
  842. }
  843. return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], false);
  844. }
  845. /**
  846. * ipa_pm_activate(): activate ipa client to vote for clock synchronously.
  847. * Cannot be called from an atomic contex.
  848. * @hdl: index of the client in the array
  849. *
  850. * Returns: 0 on success, negative on failure
  851. */
  852. int ipa_pm_activate_sync(u32 hdl)
  853. {
  854. if (ipa_pm_ctx == NULL) {
  855. IPA_PM_ERR("PM_ctx is null\n");
  856. return -EINVAL;
  857. }
  858. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  859. IPA_PM_ERR("Invalid Param\n");
  860. return -EINVAL;
  861. }
  862. return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], true);
  863. }
  864. /**
  865. * ipa_pm_deferred_deactivate(): schedule a timer to deactivate client and
  866. * devote clock. Can be called from atomic context (asynchronously)
  867. * @hdl: index of the client in the array
  868. *
  869. * Returns: 0 on success, negative on failure
  870. */
  871. int ipa_pm_deferred_deactivate(u32 hdl)
  872. {
  873. struct ipa_pm_client *client;
  874. unsigned long flags;
  875. unsigned long delay;
  876. if (ipa_pm_ctx == NULL) {
  877. IPA_PM_ERR("PM_ctx is null\n");
  878. return -EINVAL;
  879. }
  880. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  881. IPA_PM_ERR("Invalid Param\n");
  882. return -EINVAL;
  883. }
  884. client = ipa_pm_ctx->clients[hdl];
  885. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  886. spin_lock_irqsave(&client->state_lock, flags);
  887. switch (client->state) {
  888. case IPA_PM_ACTIVATE_IN_PROGRESS:
  889. client->state = IPA_PM_DEACTIVATE_IN_PROGRESS;
  890. case IPA_PM_DEACTIVATED:
  891. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  892. spin_unlock_irqrestore(&client->state_lock, flags);
  893. return 0;
  894. case IPA_PM_ACTIVATED:
  895. delay = IPA_PM_DEFERRED_TIMEOUT;
  896. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
  897. ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
  898. delay *= 5;
  899. client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
  900. queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
  901. msecs_to_jiffies(delay));
  902. break;
  903. case IPA_PM_ACTIVATED_TIMER_SET:
  904. case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
  905. client->state = IPA_PM_ACTIVATED_PENDING_RESCHEDULE;
  906. case IPA_PM_DEACTIVATE_IN_PROGRESS:
  907. case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
  908. break;
  909. case IPA_PM_STATE_MAX:
  910. default:
  911. IPA_PM_ERR("Bad State");
  912. spin_unlock_irqrestore(&client->state_lock, flags);
  913. return -EINVAL;
  914. }
  915. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  916. spin_unlock_irqrestore(&client->state_lock, flags);
  917. return 0;
  918. }
  919. /**
  920. * ipa_pm_deactivate_all_deferred(): Cancel the deferred deactivation timer and
  921. * immediately devotes for IPA clocks
  922. *
  923. * Returns: 0 on success, negative on failure
  924. */
  925. int ipa_pm_deactivate_all_deferred(void)
  926. {
  927. int i;
  928. bool run_algorithm = false;
  929. struct ipa_pm_client *client;
  930. unsigned long flags;
  931. if (ipa_pm_ctx == NULL) {
  932. IPA_PM_ERR("PM_ctx is null\n");
  933. return -EINVAL;
  934. }
  935. for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
  936. client = ipa_pm_ctx->clients[i];
  937. if (client == NULL)
  938. continue;
  939. cancel_delayed_work_sync(&client->deactivate_work);
  940. if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
  941. wait_for_completion(&client->complete);
  942. continue;
  943. }
  944. spin_lock_irqsave(&client->state_lock, flags);
  945. IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
  946. if (client->state == IPA_PM_ACTIVATED_TIMER_SET) {
  947. client->state = IPA_PM_ACTIVATED;
  948. IPA_PM_DBG_STATE(client->hdl, client->name,
  949. client->state);
  950. spin_unlock_irqrestore(&client->state_lock, flags);
  951. } else if (client->state ==
  952. IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
  953. client->state ==
  954. IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
  955. run_algorithm = true;
  956. client->state = IPA_PM_DEACTIVATED;
  957. IPA_PM_DBG_STATE(client->hdl, client->name,
  958. client->state);
  959. spin_unlock_irqrestore(&client->state_lock, flags);
  960. if (!client->skip_clk_vote) {
  961. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  962. if (client->group == IPA_PM_GROUP_APPS)
  963. __pm_relax(&client->wlock);
  964. }
  965. deactivate_client(client->hdl);
  966. } else /* if activated or deactivated, we do nothing */
  967. spin_unlock_irqrestore(&client->state_lock, flags);
  968. }
  969. if (run_algorithm)
  970. do_clk_scaling();
  971. return 0;
  972. }
  973. /**
  974. * ipa_pm_deactivate_sync(): deactivate ipa client and devote clock. Cannot be
  975. * called from atomic context.
  976. * @hdl: index of the client in the array
  977. *
  978. * Returns: 0 on success, negative on failure
  979. */
  980. int ipa_pm_deactivate_sync(u32 hdl)
  981. {
  982. struct ipa_pm_client *client;
  983. unsigned long flags;
  984. if (ipa_pm_ctx == NULL) {
  985. IPA_PM_ERR("PM_ctx is null\n");
  986. return -EINVAL;
  987. }
  988. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
  989. IPA_PM_ERR("Invalid Param\n");
  990. return -EINVAL;
  991. }
  992. client = ipa_pm_ctx->clients[hdl];
  993. cancel_delayed_work_sync(&client->deactivate_work);
  994. if (IPA_PM_STATE_IN_PROGRESS(client->state))
  995. wait_for_completion(&client->complete);
  996. spin_lock_irqsave(&client->state_lock, flags);
  997. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  998. if (client->state == IPA_PM_DEACTIVATED) {
  999. spin_unlock_irqrestore(&client->state_lock, flags);
  1000. return 0;
  1001. }
  1002. spin_unlock_irqrestore(&client->state_lock, flags);
  1003. /* else case (Deactivates all Activated cases)*/
  1004. if (!client->skip_clk_vote) {
  1005. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
  1006. if (client->group == IPA_PM_GROUP_APPS)
  1007. __pm_relax(&client->wlock);
  1008. }
  1009. spin_lock_irqsave(&client->state_lock, flags);
  1010. client->state = IPA_PM_DEACTIVATED;
  1011. IPA_PM_DBG_STATE(hdl, client->name, client->state);
  1012. spin_unlock_irqrestore(&client->state_lock, flags);
  1013. deactivate_client(hdl);
  1014. do_clk_scaling();
  1015. return 0;
  1016. }
  1017. /**
  1018. * ipa_pm_handle_suspend(): calls the callbacks of suspended clients to wake up
  1019. * @pipe_bitmask: the bits represent the indexes of the clients to be woken up
  1020. *
  1021. * Returns: 0 on success, negative on failure
  1022. */
  1023. int ipa_pm_handle_suspend(u32 pipe_bitmask)
  1024. {
  1025. int i;
  1026. struct ipa_pm_client *client;
  1027. bool client_notified[IPA_PM_MAX_CLIENTS] = { false };
  1028. if (ipa_pm_ctx == NULL) {
  1029. IPA_PM_ERR("PM_ctx is null\n");
  1030. return -EINVAL;
  1031. }
  1032. IPA_PM_DBG_LOW("bitmask: %d", pipe_bitmask);
  1033. if (pipe_bitmask == 0)
  1034. return 0;
  1035. mutex_lock(&ipa_pm_ctx->client_mutex);
  1036. for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
  1037. if (pipe_bitmask & (1 << i)) {
  1038. client = ipa_pm_ctx->clients_by_pipe[i];
  1039. if (client && !client_notified[client->hdl]) {
  1040. if (client->callback) {
  1041. client->callback(client->callback_params
  1042. , IPA_PM_REQUEST_WAKEUP);
  1043. client_notified[client->hdl] = true;
  1044. } else {
  1045. IPA_PM_ERR("client has no callback");
  1046. WARN_ON(1);
  1047. }
  1048. }
  1049. }
  1050. }
  1051. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1052. return 0;
  1053. }
  1054. /**
  1055. * ipa_pm_set_throughput(): Adds/changes the throughput requirement to IPA PM
  1056. * to be used for clock scaling
  1057. * @hdl: index of the client in the array
  1058. * @throughput: the new throughput value to be set for that client
  1059. *
  1060. * Returns: 0 on success, negative on failure
  1061. */
  1062. int ipa_pm_set_throughput(u32 hdl, int throughput)
  1063. {
  1064. struct ipa_pm_client *client;
  1065. unsigned long flags;
  1066. if (ipa_pm_ctx == NULL) {
  1067. IPA_PM_ERR("PM_ctx is null\n");
  1068. return -EINVAL;
  1069. }
  1070. if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL
  1071. || throughput < 0) {
  1072. IPA_PM_ERR("Invalid Params\n");
  1073. return -EINVAL;
  1074. }
  1075. client = ipa_pm_ctx->clients[hdl];
  1076. mutex_lock(&ipa_pm_ctx->client_mutex);
  1077. if (client->group == IPA_PM_GROUP_DEFAULT)
  1078. IPA_PM_DBG_LOW("Old throughput: %d\n", client->throughput);
  1079. else
  1080. IPA_PM_DBG_LOW("old Group %d throughput: %d\n",
  1081. client->group, ipa_pm_ctx->group_tput[client->group]);
  1082. if (client->group == IPA_PM_GROUP_DEFAULT)
  1083. client->throughput = throughput;
  1084. else
  1085. ipa_pm_ctx->group_tput[client->group] = throughput;
  1086. if (client->group == IPA_PM_GROUP_DEFAULT)
  1087. IPA_PM_DBG_LOW("New throughput: %d\n", client->throughput);
  1088. else
  1089. IPA_PM_DBG_LOW("New Group %d throughput: %d\n",
  1090. client->group, ipa_pm_ctx->group_tput[client->group]);
  1091. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1092. spin_lock_irqsave(&client->state_lock, flags);
  1093. if (IPA_PM_STATE_ACTIVE(client->state) || (client->group !=
  1094. IPA_PM_GROUP_DEFAULT)) {
  1095. spin_unlock_irqrestore(&client->state_lock, flags);
  1096. do_clk_scaling();
  1097. return 0;
  1098. }
  1099. spin_unlock_irqrestore(&client->state_lock, flags);
  1100. return 0;
  1101. }
  1102. void ipa_pm_set_clock_index(int index)
  1103. {
  1104. if (ipa_pm_ctx && index >= 0)
  1105. ipa_pm_ctx->clk_scaling.cur_vote = index;
  1106. IPA_PM_DBG("Setting pm clock vote to %d\n", index);
  1107. }
  1108. /**
  1109. * ipa_pm_stat() - print PM stat
  1110. * @buf: [in] The user buff used to print
  1111. * @size: [in] The size of buf
  1112. * Returns: number of bytes used on success, negative on failure
  1113. *
  1114. * This function is called by ipa_debugfs in order to receive
  1115. * a picture of the clients in the PM and the throughput, threshold and cur vote
  1116. */
  1117. int ipa_pm_stat(char *buf, int size)
  1118. {
  1119. struct ipa_pm_client *client;
  1120. struct clk_scaling_db *clk = &ipa_pm_ctx->clk_scaling;
  1121. int i, j, tput, cnt = 0, result = 0;
  1122. unsigned long flags;
  1123. if (!buf || size < 0)
  1124. return -EINVAL;
  1125. mutex_lock(&ipa_pm_ctx->client_mutex);
  1126. result = scnprintf(buf + cnt, size - cnt, "\n\nCurrent threshold: [");
  1127. cnt += result;
  1128. for (i = 0; i < clk->threshold_size; i++) {
  1129. result = scnprintf(buf + cnt, size - cnt,
  1130. "%d, ", clk->current_threshold[i]);
  1131. cnt += result;
  1132. }
  1133. result = scnprintf(buf + cnt, size - cnt, "\b\b]\n");
  1134. cnt += result;
  1135. result = scnprintf(buf + cnt, size - cnt,
  1136. "Aggregated tput: %d, Cur vote: %d",
  1137. ipa_pm_ctx->aggregated_tput, clk->cur_vote);
  1138. cnt += result;
  1139. result = scnprintf(buf + cnt, size - cnt, "\n\nRegistered Clients:\n");
  1140. cnt += result;
  1141. for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
  1142. client = ipa_pm_ctx->clients[i];
  1143. if (client == NULL)
  1144. continue;
  1145. spin_lock_irqsave(&client->state_lock, flags);
  1146. if (client->group == IPA_PM_GROUP_DEFAULT)
  1147. tput = client->throughput;
  1148. else
  1149. tput = ipa_pm_ctx->group_tput[client->group];
  1150. result = scnprintf(buf + cnt, size - cnt,
  1151. "Client[%d]: %s State:%s\nGroup: %s Throughput: %d Pipes: ",
  1152. i, client->name, client_state_to_str[client->state],
  1153. ipa_pm_group_to_str[client->group], tput);
  1154. cnt += result;
  1155. for (j = 0; j < IPA3_MAX_NUM_PIPES; j++) {
  1156. if (ipa_pm_ctx->clients_by_pipe[j] == client) {
  1157. result = scnprintf(buf + cnt, size - cnt,
  1158. "%d, ", j);
  1159. cnt += result;
  1160. }
  1161. }
  1162. result = scnprintf(buf + cnt, size - cnt, "\b\b\n\n");
  1163. cnt += result;
  1164. spin_unlock_irqrestore(&client->state_lock, flags);
  1165. }
  1166. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1167. return cnt;
  1168. }
  1169. /**
  1170. * ipa_pm_exceptions_stat() - print PM exceptions stat
  1171. * @buf: [in] The user buff used to print
  1172. * @size: [in] The size of buf
  1173. * Returns: number of bytes used on success, negative on failure
  1174. *
  1175. * This function is called by ipa_debugfs in order to receive
  1176. * a full picture of the exceptions in the PM
  1177. */
  1178. int ipa_pm_exceptions_stat(char *buf, int size)
  1179. {
  1180. int i, j, cnt = 0, result = 0;
  1181. struct ipa_pm_exception_list *exception;
  1182. if (!buf || size < 0)
  1183. return -EINVAL;
  1184. result = scnprintf(buf + cnt, size - cnt, "\n");
  1185. cnt += result;
  1186. mutex_lock(&ipa_pm_ctx->client_mutex);
  1187. for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
  1188. exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
  1189. if (exception == NULL) {
  1190. result = scnprintf(buf + cnt, size - cnt,
  1191. "Exception %d is NULL\n\n", i);
  1192. cnt += result;
  1193. continue;
  1194. }
  1195. result = scnprintf(buf + cnt, size - cnt,
  1196. "Exception %d: %s\nPending: %d Bitmask: %d Threshold: ["
  1197. , i, exception->clients, exception->pending,
  1198. exception->bitmask);
  1199. cnt += result;
  1200. for (j = 0; j < ipa_pm_ctx->clk_scaling.threshold_size; j++) {
  1201. result = scnprintf(buf + cnt, size - cnt,
  1202. "%d, ", exception->threshold[j]);
  1203. cnt += result;
  1204. }
  1205. result = scnprintf(buf + cnt, size - cnt, "\b\b]\n\n");
  1206. cnt += result;
  1207. }
  1208. mutex_unlock(&ipa_pm_ctx->client_mutex);
  1209. return cnt;
  1210. }