tsnep_tc.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (C) 2021 Gerhard Engleder <[email protected]> */
  3. #include "tsnep.h"
  4. #include <net/pkt_sched.h>
  5. /* save one operation at the end for additional operation at list change */
  6. #define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
  7. static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
  8. {
  9. int i;
  10. u64 cycle_time;
  11. if (!qopt->cycle_time)
  12. return -ERANGE;
  13. if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
  14. return -EINVAL;
  15. cycle_time = 0;
  16. for (i = 0; i < qopt->num_entries; i++) {
  17. if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
  18. return -EINVAL;
  19. if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
  20. return -EINVAL;
  21. if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
  22. return -EINVAL;
  23. cycle_time += qopt->entries[i].interval;
  24. }
  25. if (qopt->cycle_time != cycle_time)
  26. return -EINVAL;
  27. if (qopt->cycle_time_extension >= qopt->cycle_time)
  28. return -EINVAL;
  29. return 0;
  30. }
  31. static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
  32. u32 properties, u32 interval, bool flush)
  33. {
  34. void __iomem *addr = gcl->addr +
  35. sizeof(struct tsnep_gcl_operation) * index;
  36. gcl->operation[index].properties = properties;
  37. gcl->operation[index].interval = interval;
  38. iowrite32(properties, addr);
  39. iowrite32(interval, addr + sizeof(u32));
  40. if (flush) {
  41. /* flush write with read access */
  42. ioread32(addr);
  43. }
  44. }
  45. static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
  46. {
  47. u64 duration;
  48. int count;
  49. /* change needs to be triggered one or two operations before start of
  50. * new gate control list
  51. * - change is triggered at start of operation (minimum one operation)
  52. * - operation with adjusted interval is inserted on demand to exactly
  53. * meet the start of the new gate control list (optional)
  54. *
  55. * additionally properties are read directly after start of previous
  56. * operation
  57. *
  58. * therefore, three operations needs to be considered for the limit
  59. */
  60. duration = 0;
  61. count = 3;
  62. while (count) {
  63. duration += gcl->operation[index].interval;
  64. index--;
  65. if (index < 0)
  66. index = gcl->count - 1;
  67. count--;
  68. }
  69. return duration;
  70. }
  71. static void tsnep_write_gcl(struct tsnep_gcl *gcl,
  72. struct tc_taprio_qopt_offload *qopt)
  73. {
  74. int i;
  75. u32 properties;
  76. u64 extend;
  77. u64 cut;
  78. gcl->base_time = ktime_to_ns(qopt->base_time);
  79. gcl->cycle_time = qopt->cycle_time;
  80. gcl->cycle_time_extension = qopt->cycle_time_extension;
  81. for (i = 0; i < qopt->num_entries; i++) {
  82. properties = qopt->entries[i].gate_mask;
  83. if (i == (qopt->num_entries - 1))
  84. properties |= TSNEP_GCL_LAST;
  85. tsnep_write_gcl_operation(gcl, i, properties,
  86. qopt->entries[i].interval, true);
  87. }
  88. gcl->count = qopt->num_entries;
  89. /* calculate change limit; i.e., the time needed between enable and
  90. * start of new gate control list
  91. */
  92. /* case 1: extend cycle time for change
  93. * - change duration of last operation
  94. * - cycle time extension
  95. */
  96. extend = tsnep_change_duration(gcl, gcl->count - 1);
  97. extend += gcl->cycle_time_extension;
  98. /* case 2: cut cycle time for change
  99. * - maximum change duration
  100. */
  101. cut = 0;
  102. for (i = 0; i < gcl->count; i++)
  103. cut = max(cut, tsnep_change_duration(gcl, i));
  104. /* use maximum, because the actual case (extend or cut) can be
  105. * determined only after limit is known (chicken-and-egg problem)
  106. */
  107. gcl->change_limit = max(extend, cut);
  108. }
  109. static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
  110. {
  111. u64 start = gcl->base_time;
  112. u64 n;
  113. if (start <= limit) {
  114. n = div64_u64(limit - start, gcl->cycle_time);
  115. start += (n + 1) * gcl->cycle_time;
  116. }
  117. return start;
  118. }
  119. static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
  120. {
  121. u64 start = gcl->base_time;
  122. u64 n;
  123. n = div64_u64(limit - start, gcl->cycle_time);
  124. start += n * gcl->cycle_time;
  125. if (start == limit)
  126. start -= gcl->cycle_time;
  127. return start;
  128. }
  129. static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
  130. bool insert)
  131. {
  132. /* previous operation triggers change and properties are evaluated at
  133. * start of operation
  134. */
  135. if (index == 0)
  136. index = gcl->count - 1;
  137. else
  138. index = index - 1;
  139. change -= gcl->operation[index].interval;
  140. /* optionally change to new list with additional operation in between */
  141. if (insert) {
  142. void __iomem *addr = gcl->addr +
  143. sizeof(struct tsnep_gcl_operation) * index;
  144. gcl->operation[index].properties |= TSNEP_GCL_INSERT;
  145. iowrite32(gcl->operation[index].properties, addr);
  146. }
  147. return change;
  148. }
  149. static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
  150. {
  151. int i;
  152. u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
  153. void __iomem *addr;
  154. /* search for insert operation and reset properties */
  155. for (i = 0; i < gcl->count; i++) {
  156. if (gcl->operation[i].properties & ~mask) {
  157. addr = gcl->addr +
  158. sizeof(struct tsnep_gcl_operation) * i;
  159. gcl->operation[i].properties &= mask;
  160. iowrite32(gcl->operation[i].properties, addr);
  161. break;
  162. }
  163. }
  164. }
  165. static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
  166. u64 change, u32 interval)
  167. {
  168. u32 properties;
  169. properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
  170. /* change to new list directly after inserted operation */
  171. properties |= TSNEP_GCL_CHANGE;
  172. /* last operation of list is reserved to insert operation */
  173. tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
  174. interval, false);
  175. return tsnep_set_gcl_change(gcl, ref, change, true);
  176. }
  177. static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
  178. {
  179. int ref = gcl->count - 1;
  180. u32 interval = gcl->operation[ref].interval + extension;
  181. start -= gcl->operation[ref].interval;
  182. return tsnep_insert_gcl_operation(gcl, ref, start, interval);
  183. }
  184. static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
  185. {
  186. u64 sum = 0;
  187. int i;
  188. /* find operation which shall be cutted */
  189. for (i = 0; i < gcl->count; i++) {
  190. u64 sum_tmp = sum + gcl->operation[i].interval;
  191. u64 interval;
  192. /* sum up operations as long as cycle time is not exceeded */
  193. if (sum_tmp > cycle_time)
  194. break;
  195. /* remaining interval must be big enough for hardware */
  196. interval = cycle_time - sum_tmp;
  197. if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
  198. break;
  199. sum = sum_tmp;
  200. }
  201. if (sum == cycle_time) {
  202. /* no need to cut operation itself or whole cycle
  203. * => change exactly at operation
  204. */
  205. return tsnep_set_gcl_change(gcl, i, start + sum, false);
  206. }
  207. return tsnep_insert_gcl_operation(gcl, i, start + sum,
  208. cycle_time - sum);
  209. }
  210. static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
  211. struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
  212. {
  213. u64 system_time;
  214. u64 timeout;
  215. u64 limit;
  216. /* estimate timeout limit after timeout enable, actually timeout limit
  217. * in hardware will be earlier than estimate so we are on the safe side
  218. */
  219. tsnep_get_system_time(adapter, &system_time);
  220. timeout = system_time + TSNEP_GC_TIMEOUT;
  221. if (curr)
  222. limit = timeout + curr->change_limit;
  223. else
  224. limit = timeout;
  225. gcl->start_time = tsnep_gcl_start_after(gcl, limit);
  226. /* gate control time register is only 32bit => time shall be in the near
  227. * future (no driver support for far future implemented)
  228. */
  229. if ((gcl->start_time - system_time) >= U32_MAX)
  230. return -EAGAIN;
  231. if (curr) {
  232. /* change gate control list */
  233. u64 last;
  234. u64 change;
  235. last = tsnep_gcl_start_before(curr, gcl->start_time);
  236. if ((last + curr->cycle_time) == gcl->start_time)
  237. change = tsnep_cut_gcl(curr, last,
  238. gcl->start_time - last);
  239. else if (((gcl->start_time - last) <=
  240. curr->cycle_time_extension) ||
  241. ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
  242. change = tsnep_extend_gcl(curr, last,
  243. gcl->start_time - last);
  244. else
  245. change = tsnep_cut_gcl(curr, last,
  246. gcl->start_time - last);
  247. WARN_ON(change <= timeout);
  248. gcl->change = true;
  249. iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
  250. } else {
  251. /* start gate control list */
  252. WARN_ON(gcl->start_time <= timeout);
  253. gcl->change = false;
  254. iowrite32(gcl->start_time & 0xFFFFFFFF,
  255. adapter->addr + TSNEP_GC_TIME);
  256. }
  257. return 0;
  258. }
  259. static int tsnep_taprio(struct tsnep_adapter *adapter,
  260. struct tc_taprio_qopt_offload *qopt)
  261. {
  262. struct tsnep_gcl *gcl;
  263. struct tsnep_gcl *curr;
  264. int retval;
  265. if (!adapter->gate_control)
  266. return -EOPNOTSUPP;
  267. if (!qopt->enable) {
  268. /* disable gate control if active */
  269. mutex_lock(&adapter->gate_control_lock);
  270. if (adapter->gate_control_active) {
  271. iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
  272. adapter->gate_control_active = false;
  273. }
  274. mutex_unlock(&adapter->gate_control_lock);
  275. return 0;
  276. }
  277. retval = tsnep_validate_gcl(qopt);
  278. if (retval)
  279. return retval;
  280. mutex_lock(&adapter->gate_control_lock);
  281. gcl = &adapter->gcl[adapter->next_gcl];
  282. tsnep_write_gcl(gcl, qopt);
  283. /* select current gate control list if active */
  284. if (adapter->gate_control_active) {
  285. if (adapter->next_gcl == 0)
  286. curr = &adapter->gcl[1];
  287. else
  288. curr = &adapter->gcl[0];
  289. } else {
  290. curr = NULL;
  291. }
  292. for (;;) {
  293. /* start timeout which discards late enable, this helps ensuring
  294. * that start/change time are in the future at enable
  295. */
  296. iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
  297. retval = tsnep_enable_gcl(adapter, gcl, curr);
  298. if (retval) {
  299. mutex_unlock(&adapter->gate_control_lock);
  300. return retval;
  301. }
  302. /* enable gate control list */
  303. if (adapter->next_gcl == 0)
  304. iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
  305. else
  306. iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
  307. /* done if timeout did not happen */
  308. if (!(ioread32(adapter->addr + TSNEP_GC) &
  309. TSNEP_GC_TIMEOUT_SIGNAL))
  310. break;
  311. /* timeout is acknowledged with any enable */
  312. iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
  313. if (curr)
  314. tsnep_clean_gcl(curr);
  315. /* retry because of timeout */
  316. }
  317. adapter->gate_control_active = true;
  318. if (adapter->next_gcl == 0)
  319. adapter->next_gcl = 1;
  320. else
  321. adapter->next_gcl = 0;
  322. mutex_unlock(&adapter->gate_control_lock);
  323. return 0;
  324. }
  325. int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
  326. void *type_data)
  327. {
  328. struct tsnep_adapter *adapter = netdev_priv(netdev);
  329. switch (type) {
  330. case TC_SETUP_QDISC_TAPRIO:
  331. return tsnep_taprio(adapter, type_data);
  332. default:
  333. return -EOPNOTSUPP;
  334. }
  335. }
  336. int tsnep_tc_init(struct tsnep_adapter *adapter)
  337. {
  338. if (!adapter->gate_control)
  339. return 0;
  340. /* open all gates */
  341. iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
  342. iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
  343. adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
  344. adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
  345. return 0;
  346. }
  347. void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
  348. {
  349. if (!adapter->gate_control)
  350. return;
  351. if (adapter->gate_control_active) {
  352. iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
  353. adapter->gate_control_active = false;
  354. }
  355. }