syncpt.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Tegra host1x Syncpoints
  4. *
  5. * Copyright (c) 2010-2015, NVIDIA Corporation.
  6. */
  7. #include <linux/module.h>
  8. #include <linux/device.h>
  9. #include <linux/slab.h>
  10. #include <trace/events/host1x.h>
  11. #include "syncpt.h"
  12. #include "dev.h"
  13. #include "intr.h"
  14. #include "debug.h"
  15. #define SYNCPT_CHECK_PERIOD (2 * HZ)
  16. #define MAX_STUCK_CHECK_COUNT 15
  17. static struct host1x_syncpt_base *
  18. host1x_syncpt_base_request(struct host1x *host)
  19. {
  20. struct host1x_syncpt_base *bases = host->bases;
  21. unsigned int i;
  22. for (i = 0; i < host->info->nb_bases; i++)
  23. if (!bases[i].requested)
  24. break;
  25. if (i >= host->info->nb_bases)
  26. return NULL;
  27. bases[i].requested = true;
  28. return &bases[i];
  29. }
  30. static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
  31. {
  32. if (base)
  33. base->requested = false;
  34. }
  35. /**
  36. * host1x_syncpt_alloc() - allocate a syncpoint
  37. * @host: host1x device data
  38. * @flags: bitfield of HOST1X_SYNCPT_* flags
  39. * @name: name for the syncpoint for use in debug prints
  40. *
  41. * Allocates a hardware syncpoint for the caller's use. The caller then has
  42. * the sole authority to mutate the syncpoint's value until it is freed again.
  43. *
  44. * If no free syncpoints are available, or a NULL name was specified, returns
  45. * NULL.
  46. */
  47. struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
  48. unsigned long flags,
  49. const char *name)
  50. {
  51. struct host1x_syncpt *sp = host->syncpt;
  52. char *full_name;
  53. unsigned int i;
  54. if (!name)
  55. return NULL;
  56. mutex_lock(&host->syncpt_mutex);
  57. for (i = 0; i < host->info->nb_pts && kref_read(&sp->ref); i++, sp++)
  58. ;
  59. if (i >= host->info->nb_pts)
  60. goto unlock;
  61. if (flags & HOST1X_SYNCPT_HAS_BASE) {
  62. sp->base = host1x_syncpt_base_request(host);
  63. if (!sp->base)
  64. goto unlock;
  65. }
  66. full_name = kasprintf(GFP_KERNEL, "%u-%s", sp->id, name);
  67. if (!full_name)
  68. goto free_base;
  69. sp->name = full_name;
  70. if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
  71. sp->client_managed = true;
  72. else
  73. sp->client_managed = false;
  74. kref_init(&sp->ref);
  75. mutex_unlock(&host->syncpt_mutex);
  76. return sp;
  77. free_base:
  78. host1x_syncpt_base_free(sp->base);
  79. sp->base = NULL;
  80. unlock:
  81. mutex_unlock(&host->syncpt_mutex);
  82. return NULL;
  83. }
  84. EXPORT_SYMBOL(host1x_syncpt_alloc);
  85. /**
  86. * host1x_syncpt_id() - retrieve syncpoint ID
  87. * @sp: host1x syncpoint
  88. *
  89. * Given a pointer to a struct host1x_syncpt, retrieves its ID. This ID is
  90. * often used as a value to program into registers that control how hardware
  91. * blocks interact with syncpoints.
  92. */
  93. u32 host1x_syncpt_id(struct host1x_syncpt *sp)
  94. {
  95. return sp->id;
  96. }
  97. EXPORT_SYMBOL(host1x_syncpt_id);
  98. /**
  99. * host1x_syncpt_incr_max() - update the value sent to hardware
  100. * @sp: host1x syncpoint
  101. * @incrs: number of increments
  102. */
  103. u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
  104. {
  105. return (u32)atomic_add_return(incrs, &sp->max_val);
  106. }
  107. EXPORT_SYMBOL(host1x_syncpt_incr_max);
  108. /*
  109. * Write cached syncpoint and waitbase values to hardware.
  110. */
  111. void host1x_syncpt_restore(struct host1x *host)
  112. {
  113. struct host1x_syncpt *sp_base = host->syncpt;
  114. unsigned int i;
  115. for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
  116. /*
  117. * Unassign syncpt from channels for purposes of Tegra186
  118. * syncpoint protection. This prevents any channel from
  119. * accessing it until it is reassigned.
  120. */
  121. host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL);
  122. host1x_hw_syncpt_restore(host, sp_base + i);
  123. }
  124. for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
  125. host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
  126. host1x_hw_syncpt_enable_protection(host);
  127. wmb();
  128. }
  129. /*
  130. * Update the cached syncpoint and waitbase values by reading them
  131. * from the registers.
  132. */
  133. void host1x_syncpt_save(struct host1x *host)
  134. {
  135. struct host1x_syncpt *sp_base = host->syncpt;
  136. unsigned int i;
  137. for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
  138. if (host1x_syncpt_client_managed(sp_base + i))
  139. host1x_hw_syncpt_load(host, sp_base + i);
  140. else
  141. WARN_ON(!host1x_syncpt_idle(sp_base + i));
  142. }
  143. for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
  144. host1x_hw_syncpt_load_wait_base(host, sp_base + i);
  145. }
  146. /*
  147. * Updates the cached syncpoint value by reading a new value from the hardware
  148. * register
  149. */
  150. u32 host1x_syncpt_load(struct host1x_syncpt *sp)
  151. {
  152. u32 val;
  153. val = host1x_hw_syncpt_load(sp->host, sp);
  154. trace_host1x_syncpt_load_min(sp->id, val);
  155. return val;
  156. }
  157. /*
  158. * Get the current syncpoint base
  159. */
  160. u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
  161. {
  162. host1x_hw_syncpt_load_wait_base(sp->host, sp);
  163. return sp->base_val;
  164. }
  165. /**
  166. * host1x_syncpt_incr() - increment syncpoint value from CPU, updating cache
  167. * @sp: host1x syncpoint
  168. */
  169. int host1x_syncpt_incr(struct host1x_syncpt *sp)
  170. {
  171. return host1x_hw_syncpt_cpu_incr(sp->host, sp);
  172. }
  173. EXPORT_SYMBOL(host1x_syncpt_incr);
  174. /*
  175. * Updated sync point form hardware, and returns true if syncpoint is expired,
  176. * false if we may need to wait
  177. */
  178. static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
  179. {
  180. host1x_hw_syncpt_load(sp->host, sp);
  181. return host1x_syncpt_is_expired(sp, thresh);
  182. }
  183. /**
  184. * host1x_syncpt_wait() - wait for a syncpoint to reach a given value
  185. * @sp: host1x syncpoint
  186. * @thresh: threshold
  187. * @timeout: maximum time to wait for the syncpoint to reach the given value
  188. * @value: return location for the syncpoint value
  189. */
  190. int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
  191. u32 *value)
  192. {
  193. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
  194. void *ref;
  195. struct host1x_waitlist *waiter;
  196. int err = 0, check_count = 0;
  197. if (value)
  198. *value = host1x_syncpt_load(sp);
  199. if (host1x_syncpt_is_expired(sp, thresh))
  200. return 0;
  201. if (!timeout) {
  202. err = -EAGAIN;
  203. goto done;
  204. }
  205. /* allocate a waiter */
  206. waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
  207. if (!waiter) {
  208. err = -ENOMEM;
  209. goto done;
  210. }
  211. /* schedule a wakeup when the syncpoint value is reached */
  212. err = host1x_intr_add_action(sp->host, sp, thresh,
  213. HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
  214. &wq, waiter, &ref);
  215. if (err)
  216. goto done;
  217. err = -EAGAIN;
  218. /* Caller-specified timeout may be impractically low */
  219. if (timeout < 0)
  220. timeout = LONG_MAX;
  221. /* wait for the syncpoint, or timeout, or signal */
  222. while (timeout) {
  223. long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
  224. int remain;
  225. remain = wait_event_interruptible_timeout(wq,
  226. syncpt_load_min_is_expired(sp, thresh),
  227. check);
  228. if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
  229. if (value)
  230. *value = host1x_syncpt_load(sp);
  231. err = 0;
  232. break;
  233. }
  234. if (remain < 0) {
  235. err = remain;
  236. break;
  237. }
  238. timeout -= check;
  239. if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
  240. dev_warn(sp->host->dev,
  241. "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
  242. current->comm, sp->id, sp->name,
  243. thresh, timeout);
  244. host1x_debug_dump_syncpts(sp->host);
  245. if (check_count == MAX_STUCK_CHECK_COUNT)
  246. host1x_debug_dump(sp->host);
  247. check_count++;
  248. }
  249. }
  250. host1x_intr_put_ref(sp->host, sp->id, ref, true);
  251. done:
  252. return err;
  253. }
  254. EXPORT_SYMBOL(host1x_syncpt_wait);
  255. /*
  256. * Returns true if syncpoint is expired, false if we may need to wait
  257. */
  258. bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
  259. {
  260. u32 current_val;
  261. smp_rmb();
  262. current_val = (u32)atomic_read(&sp->min_val);
  263. return ((current_val - thresh) & 0x80000000U) == 0U;
  264. }
  265. int host1x_syncpt_init(struct host1x *host)
  266. {
  267. struct host1x_syncpt_base *bases;
  268. struct host1x_syncpt *syncpt;
  269. unsigned int i;
  270. syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
  271. GFP_KERNEL);
  272. if (!syncpt)
  273. return -ENOMEM;
  274. bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
  275. GFP_KERNEL);
  276. if (!bases)
  277. return -ENOMEM;
  278. for (i = 0; i < host->info->nb_pts; i++) {
  279. syncpt[i].id = i;
  280. syncpt[i].host = host;
  281. }
  282. for (i = 0; i < host->info->nb_bases; i++)
  283. bases[i].id = i;
  284. mutex_init(&host->syncpt_mutex);
  285. host->syncpt = syncpt;
  286. host->bases = bases;
  287. /* Allocate sync point to use for clearing waits for expired fences */
  288. host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop");
  289. if (!host->nop_sp)
  290. return -ENOMEM;
  291. if (host->info->reserve_vblank_syncpts) {
  292. kref_init(&host->syncpt[26].ref);
  293. kref_init(&host->syncpt[27].ref);
  294. }
  295. return 0;
  296. }
  297. /**
  298. * host1x_syncpt_request() - request a syncpoint
  299. * @client: client requesting the syncpoint
  300. * @flags: flags
  301. *
  302. * host1x client drivers can use this function to allocate a syncpoint for
  303. * subsequent use. A syncpoint returned by this function will be reserved for
  304. * use by the client exclusively. When no longer using a syncpoint, a host1x
  305. * client driver needs to release it using host1x_syncpt_put().
  306. */
  307. struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
  308. unsigned long flags)
  309. {
  310. struct host1x *host = dev_get_drvdata(client->host->parent);
  311. return host1x_syncpt_alloc(host, flags, dev_name(client->dev));
  312. }
  313. EXPORT_SYMBOL(host1x_syncpt_request);
  314. static void syncpt_release(struct kref *ref)
  315. {
  316. struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref);
  317. atomic_set(&sp->max_val, host1x_syncpt_read(sp));
  318. sp->locked = false;
  319. mutex_lock(&sp->host->syncpt_mutex);
  320. host1x_syncpt_base_free(sp->base);
  321. kfree(sp->name);
  322. sp->base = NULL;
  323. sp->name = NULL;
  324. sp->client_managed = false;
  325. mutex_unlock(&sp->host->syncpt_mutex);
  326. }
  327. /**
  328. * host1x_syncpt_put() - free a requested syncpoint
  329. * @sp: host1x syncpoint
  330. *
  331. * Release a syncpoint previously allocated using host1x_syncpt_request(). A
  332. * host1x client driver should call this when the syncpoint is no longer in
  333. * use.
  334. */
  335. void host1x_syncpt_put(struct host1x_syncpt *sp)
  336. {
  337. if (!sp)
  338. return;
  339. kref_put(&sp->ref, syncpt_release);
  340. }
  341. EXPORT_SYMBOL(host1x_syncpt_put);
  342. void host1x_syncpt_deinit(struct host1x *host)
  343. {
  344. struct host1x_syncpt *sp = host->syncpt;
  345. unsigned int i;
  346. for (i = 0; i < host->info->nb_pts; i++, sp++)
  347. kfree(sp->name);
  348. }
  349. /**
  350. * host1x_syncpt_read_max() - read maximum syncpoint value
  351. * @sp: host1x syncpoint
  352. *
  353. * The maximum syncpoint value indicates how many operations there are in
  354. * queue, either in channel or in a software thread.
  355. */
  356. u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
  357. {
  358. smp_rmb();
  359. return (u32)atomic_read(&sp->max_val);
  360. }
  361. EXPORT_SYMBOL(host1x_syncpt_read_max);
  362. /**
  363. * host1x_syncpt_read_min() - read minimum syncpoint value
  364. * @sp: host1x syncpoint
  365. *
  366. * The minimum syncpoint value is a shadow of the current sync point value in
  367. * hardware.
  368. */
  369. u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
  370. {
  371. smp_rmb();
  372. return (u32)atomic_read(&sp->min_val);
  373. }
  374. EXPORT_SYMBOL(host1x_syncpt_read_min);
  375. /**
  376. * host1x_syncpt_read() - read the current syncpoint value
  377. * @sp: host1x syncpoint
  378. */
  379. u32 host1x_syncpt_read(struct host1x_syncpt *sp)
  380. {
  381. return host1x_syncpt_load(sp);
  382. }
  383. EXPORT_SYMBOL(host1x_syncpt_read);
  384. unsigned int host1x_syncpt_nb_pts(struct host1x *host)
  385. {
  386. return host->info->nb_pts;
  387. }
  388. unsigned int host1x_syncpt_nb_bases(struct host1x *host)
  389. {
  390. return host->info->nb_bases;
  391. }
  392. unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
  393. {
  394. return host->info->nb_mlocks;
  395. }
  396. /**
  397. * host1x_syncpt_get_by_id() - obtain a syncpoint by ID
  398. * @host: host1x controller
  399. * @id: syncpoint ID
  400. */
  401. struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host,
  402. unsigned int id)
  403. {
  404. if (id >= host->info->nb_pts)
  405. return NULL;
  406. if (kref_get_unless_zero(&host->syncpt[id].ref))
  407. return &host->syncpt[id];
  408. else
  409. return NULL;
  410. }
  411. EXPORT_SYMBOL(host1x_syncpt_get_by_id);
  412. /**
  413. * host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't
  414. * increase the refcount.
  415. * @host: host1x controller
  416. * @id: syncpoint ID
  417. */
  418. struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host,
  419. unsigned int id)
  420. {
  421. if (id >= host->info->nb_pts)
  422. return NULL;
  423. return &host->syncpt[id];
  424. }
  425. EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref);
  426. /**
  427. * host1x_syncpt_get() - increment syncpoint refcount
  428. * @sp: syncpoint
  429. */
  430. struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp)
  431. {
  432. kref_get(&sp->ref);
  433. return sp;
  434. }
  435. EXPORT_SYMBOL(host1x_syncpt_get);
  436. /**
  437. * host1x_syncpt_get_base() - obtain the wait base associated with a syncpoint
  438. * @sp: host1x syncpoint
  439. */
  440. struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
  441. {
  442. return sp ? sp->base : NULL;
  443. }
  444. EXPORT_SYMBOL(host1x_syncpt_get_base);
  445. /**
  446. * host1x_syncpt_base_id() - retrieve the ID of a syncpoint wait base
  447. * @base: host1x syncpoint wait base
  448. */
  449. u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
  450. {
  451. return base->id;
  452. }
  453. EXPORT_SYMBOL(host1x_syncpt_base_id);
  454. static void do_nothing(struct kref *ref)
  455. {
  456. }
  457. /**
  458. * host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint
  459. * available for allocation
  460. *
  461. * @client: host1x bus client
  462. * @syncpt_id: syncpoint ID to make available
  463. *
  464. * Makes VBLANK<i> syncpoint available for allocatation if it was
  465. * reserved at initialization time. This should be called by the display
  466. * driver after it has ensured that any VBLANK increment programming configured
  467. * by the boot chain has been disabled.
  468. */
  469. void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
  470. u32 syncpt_id)
  471. {
  472. struct host1x *host = dev_get_drvdata(client->host->parent);
  473. if (!host->info->reserve_vblank_syncpts)
  474. return;
  475. kref_put(&host->syncpt[syncpt_id].ref, do_nothing);
  476. }
  477. EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation);