sja1105_tas.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019, Vladimir Oltean <[email protected]>
  3. */
  4. #include "sja1105.h"
  5. #define SJA1105_TAS_CLKSRC_DISABLED 0
  6. #define SJA1105_TAS_CLKSRC_STANDALONE 1
  7. #define SJA1105_TAS_CLKSRC_AS6802 2
  8. #define SJA1105_TAS_CLKSRC_PTP 3
  9. #define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
  10. #define work_to_sja1105_tas(d) \
  11. container_of((d), struct sja1105_tas_data, tas_work)
  12. #define tas_to_sja1105(d) \
  13. container_of((d), struct sja1105_private, tas_data)
  14. static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
  15. {
  16. struct sja1105_tas_data *tas_data = &priv->tas_data;
  17. struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
  18. struct dsa_switch *ds = priv->ds;
  19. s64 earliest_base_time = S64_MAX;
  20. s64 latest_base_time = 0;
  21. s64 its_cycle_time = 0;
  22. s64 max_cycle_time = 0;
  23. int port;
  24. tas_data->enabled = false;
  25. for (port = 0; port < ds->num_ports; port++) {
  26. const struct tc_taprio_qopt_offload *offload;
  27. offload = tas_data->offload[port];
  28. if (!offload)
  29. continue;
  30. tas_data->enabled = true;
  31. if (max_cycle_time < offload->cycle_time)
  32. max_cycle_time = offload->cycle_time;
  33. if (latest_base_time < offload->base_time)
  34. latest_base_time = offload->base_time;
  35. if (earliest_base_time > offload->base_time) {
  36. earliest_base_time = offload->base_time;
  37. its_cycle_time = offload->cycle_time;
  38. }
  39. }
  40. if (!list_empty(&gating_cfg->entries)) {
  41. tas_data->enabled = true;
  42. if (max_cycle_time < gating_cfg->cycle_time)
  43. max_cycle_time = gating_cfg->cycle_time;
  44. if (latest_base_time < gating_cfg->base_time)
  45. latest_base_time = gating_cfg->base_time;
  46. if (earliest_base_time > gating_cfg->base_time) {
  47. earliest_base_time = gating_cfg->base_time;
  48. its_cycle_time = gating_cfg->cycle_time;
  49. }
  50. }
  51. if (!tas_data->enabled)
  52. return 0;
  53. /* Roll the earliest base time over until it is in a comparable
  54. * time base with the latest, then compare their deltas.
  55. * We want to enforce that all ports' base times are within
  56. * SJA1105_TAS_MAX_DELTA 200ns cycles of one another.
  57. */
  58. earliest_base_time = future_base_time(earliest_base_time,
  59. its_cycle_time,
  60. latest_base_time);
  61. while (earliest_base_time > latest_base_time)
  62. earliest_base_time -= its_cycle_time;
  63. if (latest_base_time - earliest_base_time >
  64. sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
  65. dev_err(ds->dev,
  66. "Base times too far apart: min %llu max %llu\n",
  67. earliest_base_time, latest_base_time);
  68. return -ERANGE;
  69. }
  70. tas_data->earliest_base_time = earliest_base_time;
  71. tas_data->max_cycle_time = max_cycle_time;
  72. dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
  73. dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
  74. dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
  75. return 0;
  76. }
  77. /* Lo and behold: the egress scheduler from hell.
  78. *
  79. * At the hardware level, the Time-Aware Shaper holds a global linear arrray of
  80. * all schedule entries for all ports. These are the Gate Control List (GCL)
  81. * entries, let's call them "timeslots" for short. This linear array of
  82. * timeslots is held in BLK_IDX_SCHEDULE.
  83. *
  84. * Then there are a maximum of 8 "execution threads" inside the switch, which
  85. * iterate cyclically through the "schedule". Each "cycle" has an entry point
  86. * and an exit point, both being timeslot indices in the schedule table. The
  87. * hardware calls each cycle a "subschedule".
  88. *
  89. * Subschedule (cycle) i starts when
  90. * ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta.
  91. *
  92. * The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from
  93. * k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to
  94. * k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i]
  95. *
  96. * For each schedule entry (timeslot) k, the engine executes the gate control
  97. * list entry for the duration of BLK_IDX_SCHEDULE[k].delta.
  98. *
  99. * +---------+
  100. * | | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
  101. * +---------+
  102. * |
  103. * +-----------------+
  104. * | .actsubsch
  105. * BLK_IDX_SCHEDULE_ENTRY_POINTS v
  106. * +-------+-------+
  107. * |cycle 0|cycle 1|
  108. * +-------+-------+
  109. * | | | |
  110. * +----------------+ | | +-------------------------------------+
  111. * | .subschindx | | .subschindx |
  112. * | | +---------------+ |
  113. * | .address | .address | |
  114. * | | | |
  115. * | | | |
  116. * | BLK_IDX_SCHEDULE v v |
  117. * | +-------+-------+-------+-------+-------+------+ |
  118. * | |entry 0|entry 1|entry 2|entry 3|entry 4|entry5| |
  119. * | +-------+-------+-------+-------+-------+------+ |
  120. * | ^ ^ ^ ^ |
  121. * | | | | | |
  122. * | +-------------------------+ | | | |
  123. * | | +-------------------------------+ | | |
  124. * | | | +-------------------+ | |
  125. * | | | | | |
  126. * | +---------------------------------------------------------------+ |
  127. * | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| |
  128. * | +---------------------------------------------------------------+ |
  129. * | ^ ^ BLK_IDX_SCHEDULE_PARAMS |
  130. * | | | |
  131. * +--------+ +-------------------------------------------+
  132. *
  133. * In the above picture there are two subschedules (cycles):
  134. *
  135. * - cycle 0: iterates the schedule table from 0 to 2 (and back)
  136. * - cycle 1: iterates the schedule table from 3 to 5 (and back)
  137. *
  138. * All other possible execution threads must be marked as unused by making
  139. * their "subschedule end index" (subscheind) equal to the last valid
  140. * subschedule's end index (in this case 5).
  141. */
  142. int sja1105_init_scheduling(struct sja1105_private *priv)
  143. {
  144. struct sja1105_schedule_entry_points_entry *schedule_entry_points;
  145. struct sja1105_schedule_entry_points_params_entry
  146. *schedule_entry_points_params;
  147. struct sja1105_schedule_params_entry *schedule_params;
  148. struct sja1105_tas_data *tas_data = &priv->tas_data;
  149. struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
  150. struct sja1105_schedule_entry *schedule;
  151. struct dsa_switch *ds = priv->ds;
  152. struct sja1105_table *table;
  153. int schedule_start_idx;
  154. s64 entry_point_delta;
  155. int schedule_end_idx;
  156. int num_entries = 0;
  157. int num_cycles = 0;
  158. int cycle = 0;
  159. int i, k = 0;
  160. int port, rc;
  161. rc = sja1105_tas_set_runtime_params(priv);
  162. if (rc < 0)
  163. return rc;
  164. /* Discard previous Schedule Table */
  165. table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
  166. if (table->entry_count) {
  167. kfree(table->entries);
  168. table->entry_count = 0;
  169. }
  170. /* Discard previous Schedule Entry Points Parameters Table */
  171. table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
  172. if (table->entry_count) {
  173. kfree(table->entries);
  174. table->entry_count = 0;
  175. }
  176. /* Discard previous Schedule Parameters Table */
  177. table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
  178. if (table->entry_count) {
  179. kfree(table->entries);
  180. table->entry_count = 0;
  181. }
  182. /* Discard previous Schedule Entry Points Table */
  183. table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
  184. if (table->entry_count) {
  185. kfree(table->entries);
  186. table->entry_count = 0;
  187. }
  188. /* Figure out the dimensioning of the problem */
  189. for (port = 0; port < ds->num_ports; port++) {
  190. if (tas_data->offload[port]) {
  191. num_entries += tas_data->offload[port]->num_entries;
  192. num_cycles++;
  193. }
  194. }
  195. if (!list_empty(&gating_cfg->entries)) {
  196. num_entries += gating_cfg->num_entries;
  197. num_cycles++;
  198. }
  199. /* Nothing to do */
  200. if (!num_cycles)
  201. return 0;
  202. /* Pre-allocate space in the static config tables */
  203. /* Schedule Table */
  204. table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
  205. table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
  206. GFP_KERNEL);
  207. if (!table->entries)
  208. return -ENOMEM;
  209. table->entry_count = num_entries;
  210. schedule = table->entries;
  211. /* Schedule Points Parameters Table */
  212. table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
  213. table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
  214. table->ops->unpacked_entry_size, GFP_KERNEL);
  215. if (!table->entries)
  216. /* Previously allocated memory will be freed automatically in
  217. * sja1105_static_config_free. This is true for all early
  218. * returns below.
  219. */
  220. return -ENOMEM;
  221. table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
  222. schedule_entry_points_params = table->entries;
  223. /* Schedule Parameters Table */
  224. table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
  225. table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
  226. table->ops->unpacked_entry_size, GFP_KERNEL);
  227. if (!table->entries)
  228. return -ENOMEM;
  229. table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
  230. schedule_params = table->entries;
  231. /* Schedule Entry Points Table */
  232. table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
  233. table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
  234. GFP_KERNEL);
  235. if (!table->entries)
  236. return -ENOMEM;
  237. table->entry_count = num_cycles;
  238. schedule_entry_points = table->entries;
  239. /* Finally start populating the static config tables */
  240. schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
  241. schedule_entry_points_params->actsubsch = num_cycles - 1;
  242. for (port = 0; port < ds->num_ports; port++) {
  243. const struct tc_taprio_qopt_offload *offload;
  244. /* Relative base time */
  245. s64 rbt;
  246. offload = tas_data->offload[port];
  247. if (!offload)
  248. continue;
  249. schedule_start_idx = k;
  250. schedule_end_idx = k + offload->num_entries - 1;
  251. /* This is the base time expressed as a number of TAS ticks
  252. * relative to PTPSCHTM, which we'll (perhaps improperly) call
  253. * the operational base time.
  254. */
  255. rbt = future_base_time(offload->base_time,
  256. offload->cycle_time,
  257. tas_data->earliest_base_time);
  258. rbt -= tas_data->earliest_base_time;
  259. /* UM10944.pdf 4.2.2. Schedule Entry Points table says that
  260. * delta cannot be zero, which is shitty. Advance all relative
  261. * base times by 1 TAS delta, so that even the earliest base
  262. * time becomes 1 in relative terms. Then start the operational
  263. * base time (PTPSCHTM) one TAS delta earlier than planned.
  264. */
  265. entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
  266. schedule_entry_points[cycle].subschindx = cycle;
  267. schedule_entry_points[cycle].delta = entry_point_delta;
  268. schedule_entry_points[cycle].address = schedule_start_idx;
  269. /* The subschedule end indices need to be
  270. * monotonically increasing.
  271. */
  272. for (i = cycle; i < 8; i++)
  273. schedule_params->subscheind[i] = schedule_end_idx;
  274. for (i = 0; i < offload->num_entries; i++, k++) {
  275. s64 delta_ns = offload->entries[i].interval;
  276. schedule[k].delta = ns_to_sja1105_delta(delta_ns);
  277. schedule[k].destports = BIT(port);
  278. schedule[k].resmedia_en = true;
  279. schedule[k].resmedia = SJA1105_GATE_MASK &
  280. ~offload->entries[i].gate_mask;
  281. }
  282. cycle++;
  283. }
  284. if (!list_empty(&gating_cfg->entries)) {
  285. struct sja1105_gate_entry *e;
  286. /* Relative base time */
  287. s64 rbt;
  288. schedule_start_idx = k;
  289. schedule_end_idx = k + gating_cfg->num_entries - 1;
  290. rbt = future_base_time(gating_cfg->base_time,
  291. gating_cfg->cycle_time,
  292. tas_data->earliest_base_time);
  293. rbt -= tas_data->earliest_base_time;
  294. entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
  295. schedule_entry_points[cycle].subschindx = cycle;
  296. schedule_entry_points[cycle].delta = entry_point_delta;
  297. schedule_entry_points[cycle].address = schedule_start_idx;
  298. for (i = cycle; i < 8; i++)
  299. schedule_params->subscheind[i] = schedule_end_idx;
  300. list_for_each_entry(e, &gating_cfg->entries, list) {
  301. schedule[k].delta = ns_to_sja1105_delta(e->interval);
  302. schedule[k].destports = e->rule->vl.destports;
  303. schedule[k].setvalid = true;
  304. schedule[k].txen = true;
  305. schedule[k].vlindex = e->rule->vl.sharindx;
  306. schedule[k].winstindex = e->rule->vl.sharindx;
  307. if (e->gate_state) /* Gate open */
  308. schedule[k].winst = true;
  309. else /* Gate closed */
  310. schedule[k].winend = true;
  311. k++;
  312. }
  313. }
  314. return 0;
  315. }
  316. /* Be there 2 port subschedules, each executing an arbitrary number of gate
  317. * open/close events cyclically.
  318. * None of those gate events must ever occur at the exact same time, otherwise
  319. * the switch is known to act in exotically strange ways.
  320. * However the hardware doesn't bother performing these integrity checks.
  321. * So here we are with the task of validating whether the new @admin offload
  322. * has any conflict with the already established TAS configuration in
  323. * tas_data->offload. We already know the other ports are in harmony with one
  324. * another, otherwise we wouldn't have saved them.
  325. * Each gate event executes periodically, with a period of @cycle_time and a
  326. * phase given by its cycle's @base_time plus its offset within the cycle
  327. * (which in turn is given by the length of the events prior to it).
  328. * There are two aspects to possible collisions:
  329. * - Collisions within one cycle's (actually the longest cycle's) time frame.
  330. * For that, we need to compare the cartesian product of each possible
  331. * occurrence of each event within one cycle time.
  332. * - Collisions in the future. Events may not collide within one cycle time,
  333. * but if two port schedules don't have the same periodicity (aka the cycle
  334. * times aren't multiples of one another), they surely will some time in the
  335. * future (actually they will collide an infinite amount of times).
  336. */
  337. static bool
  338. sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
  339. const struct tc_taprio_qopt_offload *admin)
  340. {
  341. struct sja1105_tas_data *tas_data = &priv->tas_data;
  342. const struct tc_taprio_qopt_offload *offload;
  343. s64 max_cycle_time, min_cycle_time;
  344. s64 delta1, delta2;
  345. s64 rbt1, rbt2;
  346. s64 stop_time;
  347. s64 t1, t2;
  348. int i, j;
  349. s32 rem;
  350. offload = tas_data->offload[port];
  351. if (!offload)
  352. return false;
  353. /* Check if the two cycle times are multiples of one another.
  354. * If they aren't, then they will surely collide.
  355. */
  356. max_cycle_time = max(offload->cycle_time, admin->cycle_time);
  357. min_cycle_time = min(offload->cycle_time, admin->cycle_time);
  358. div_s64_rem(max_cycle_time, min_cycle_time, &rem);
  359. if (rem)
  360. return true;
  361. /* Calculate the "reduced" base time of each of the two cycles
  362. * (transposed back as close to 0 as possible) by dividing to
  363. * the cycle time.
  364. */
  365. div_s64_rem(offload->base_time, offload->cycle_time, &rem);
  366. rbt1 = rem;
  367. div_s64_rem(admin->base_time, admin->cycle_time, &rem);
  368. rbt2 = rem;
  369. stop_time = max_cycle_time + max(rbt1, rbt2);
  370. /* delta1 is the relative base time of each GCL entry within
  371. * the established ports' TAS config.
  372. */
  373. for (i = 0, delta1 = 0;
  374. i < offload->num_entries;
  375. delta1 += offload->entries[i].interval, i++) {
  376. /* delta2 is the relative base time of each GCL entry
  377. * within the newly added TAS config.
  378. */
  379. for (j = 0, delta2 = 0;
  380. j < admin->num_entries;
  381. delta2 += admin->entries[j].interval, j++) {
  382. /* t1 follows all possible occurrences of the
  383. * established ports' GCL entry i within the
  384. * first cycle time.
  385. */
  386. for (t1 = rbt1 + delta1;
  387. t1 <= stop_time;
  388. t1 += offload->cycle_time) {
  389. /* t2 follows all possible occurrences
  390. * of the newly added GCL entry j
  391. * within the first cycle time.
  392. */
  393. for (t2 = rbt2 + delta2;
  394. t2 <= stop_time;
  395. t2 += admin->cycle_time) {
  396. if (t1 == t2) {
  397. dev_warn(priv->ds->dev,
  398. "GCL entry %d collides with entry %d of port %d\n",
  399. j, i, port);
  400. return true;
  401. }
  402. }
  403. }
  404. }
  405. }
  406. return false;
  407. }
  408. /* Check the tc-taprio configuration on @port for conflicts with the tc-gate
  409. * global subschedule. If @port is -1, check it against all ports.
  410. * To reuse the sja1105_tas_check_conflicts logic without refactoring it,
  411. * convert the gating configuration to a dummy tc-taprio offload structure.
  412. */
  413. bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
  414. struct netlink_ext_ack *extack)
  415. {
  416. struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
  417. size_t num_entries = gating_cfg->num_entries;
  418. struct tc_taprio_qopt_offload *dummy;
  419. struct dsa_switch *ds = priv->ds;
  420. struct sja1105_gate_entry *e;
  421. bool conflict;
  422. int i = 0;
  423. if (list_empty(&gating_cfg->entries))
  424. return false;
  425. dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL);
  426. if (!dummy) {
  427. NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
  428. return true;
  429. }
  430. dummy->num_entries = num_entries;
  431. dummy->base_time = gating_cfg->base_time;
  432. dummy->cycle_time = gating_cfg->cycle_time;
  433. list_for_each_entry(e, &gating_cfg->entries, list)
  434. dummy->entries[i++].interval = e->interval;
  435. if (port != -1) {
  436. conflict = sja1105_tas_check_conflicts(priv, port, dummy);
  437. } else {
  438. for (port = 0; port < ds->num_ports; port++) {
  439. conflict = sja1105_tas_check_conflicts(priv, port,
  440. dummy);
  441. if (conflict)
  442. break;
  443. }
  444. }
  445. kfree(dummy);
  446. return conflict;
  447. }
  448. int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
  449. struct tc_taprio_qopt_offload *admin)
  450. {
  451. struct sja1105_private *priv = ds->priv;
  452. struct sja1105_tas_data *tas_data = &priv->tas_data;
  453. int other_port, rc, i;
  454. /* Can't change an already configured port (must delete qdisc first).
  455. * Can't delete the qdisc from an unconfigured port.
  456. */
  457. if (!!tas_data->offload[port] == admin->enable)
  458. return -EINVAL;
  459. if (!admin->enable) {
  460. taprio_offload_free(tas_data->offload[port]);
  461. tas_data->offload[port] = NULL;
  462. rc = sja1105_init_scheduling(priv);
  463. if (rc < 0)
  464. return rc;
  465. return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
  466. }
  467. /* The cycle time extension is the amount of time the last cycle from
  468. * the old OPER needs to be extended in order to phase-align with the
  469. * base time of the ADMIN when that becomes the new OPER.
  470. * But of course our switch needs to be reset to switch-over between
  471. * the ADMIN and the OPER configs - so much for a seamless transition.
  472. * So don't add insult over injury and just say we don't support cycle
  473. * time extension.
  474. */
  475. if (admin->cycle_time_extension)
  476. return -ENOTSUPP;
  477. for (i = 0; i < admin->num_entries; i++) {
  478. s64 delta_ns = admin->entries[i].interval;
  479. s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
  480. bool too_long, too_short;
  481. too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA);
  482. too_short = (delta_cycles == 0);
  483. if (too_long || too_short) {
  484. dev_err(priv->ds->dev,
  485. "Interval %llu too %s for GCL entry %d\n",
  486. delta_ns, too_long ? "long" : "short", i);
  487. return -ERANGE;
  488. }
  489. }
  490. for (other_port = 0; other_port < ds->num_ports; other_port++) {
  491. if (other_port == port)
  492. continue;
  493. if (sja1105_tas_check_conflicts(priv, other_port, admin))
  494. return -ERANGE;
  495. }
  496. if (sja1105_gating_check_conflicts(priv, port, NULL)) {
  497. dev_err(ds->dev, "Conflict with tc-gate schedule\n");
  498. return -ERANGE;
  499. }
  500. tas_data->offload[port] = taprio_offload_get(admin);
  501. rc = sja1105_init_scheduling(priv);
  502. if (rc < 0)
  503. return rc;
  504. return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
  505. }
  506. static int sja1105_tas_check_running(struct sja1105_private *priv)
  507. {
  508. struct sja1105_tas_data *tas_data = &priv->tas_data;
  509. struct dsa_switch *ds = priv->ds;
  510. struct sja1105_ptp_cmd cmd = {0};
  511. int rc;
  512. rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
  513. if (rc < 0)
  514. return rc;
  515. if (cmd.ptpstrtsch == 1)
  516. /* Schedule successfully started */
  517. tas_data->state = SJA1105_TAS_STATE_RUNNING;
  518. else if (cmd.ptpstopsch == 1)
  519. /* Schedule is stopped */
  520. tas_data->state = SJA1105_TAS_STATE_DISABLED;
  521. else
  522. /* Schedule is probably not configured with PTP clock source */
  523. rc = -EINVAL;
  524. return rc;
  525. }
  526. /* Write to PTPCLKCORP */
  527. static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
  528. u64 correction)
  529. {
  530. const struct sja1105_regs *regs = priv->info->regs;
  531. u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
  532. return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
  533. &ptpclkcorp, NULL);
  534. }
  535. /* Write to PTPSCHTM */
  536. static int sja1105_tas_set_base_time(struct sja1105_private *priv,
  537. u64 base_time)
  538. {
  539. const struct sja1105_regs *regs = priv->info->regs;
  540. u64 ptpschtm = ns_to_sja1105_ticks(base_time);
  541. return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
  542. &ptpschtm, NULL);
  543. }
  544. static int sja1105_tas_start(struct sja1105_private *priv)
  545. {
  546. struct sja1105_tas_data *tas_data = &priv->tas_data;
  547. struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
  548. struct dsa_switch *ds = priv->ds;
  549. int rc;
  550. dev_dbg(ds->dev, "Starting the TAS\n");
  551. if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
  552. tas_data->state == SJA1105_TAS_STATE_RUNNING) {
  553. dev_err(ds->dev, "TAS already started\n");
  554. return -EINVAL;
  555. }
  556. cmd->ptpstrtsch = 1;
  557. cmd->ptpstopsch = 0;
  558. rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
  559. if (rc < 0)
  560. return rc;
  561. tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
  562. return 0;
  563. }
  564. static int sja1105_tas_stop(struct sja1105_private *priv)
  565. {
  566. struct sja1105_tas_data *tas_data = &priv->tas_data;
  567. struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
  568. struct dsa_switch *ds = priv->ds;
  569. int rc;
  570. dev_dbg(ds->dev, "Stopping the TAS\n");
  571. if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
  572. dev_err(ds->dev, "TAS already disabled\n");
  573. return -EINVAL;
  574. }
  575. cmd->ptpstopsch = 1;
  576. cmd->ptpstrtsch = 0;
  577. rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
  578. if (rc < 0)
  579. return rc;
  580. tas_data->state = SJA1105_TAS_STATE_DISABLED;
  581. return 0;
  582. }
  583. /* The schedule engine and the PTP clock are driven by the same oscillator, and
  584. * they run in parallel. But whilst the PTP clock can keep an absolute
  585. * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make
  586. * up a delta, which is 200ns), and wrapping around at the end of each cycle.
  587. * The schedule engine is started when the PTP clock reaches the PTPSCHTM time
  588. * (in PTP domain).
  589. * Because the PTP clock can be rate-corrected (accelerated or slowed down) by
  590. * a software servo, and the schedule engine clock runs in parallel to the PTP
  591. * clock, there is logic internal to the switch that periodically keeps the
  592. * schedule engine from drifting away. The frequency with which this internal
  593. * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is
  594. * a value also in the PTP clock domain, and is also rate-corrected.
  595. * To be precise, during a correction period, there is logic to determine by
  596. * how many scheduler clock ticks has the PTP clock drifted. At the end of each
  597. * correction period/beginning of new one, the length of a delta is shrunk or
  598. * expanded with an integer number of ticks, compared with the typical 25.
  599. * So a delta lasts for 200ns (or 25 ticks) only on average.
  600. * Sometimes it is longer, sometimes it is shorter. The internal syntonization
  601. * logic can adjust for at most 5 ticks each 20 ticks.
  602. *
  603. * The first implication is that you should choose your schedule correction
  604. * period to be an integer multiple of the schedule length. Preferably one.
  605. * In case there are schedules of multiple ports active, then the correction
  606. * period needs to be a multiple of them all. Given the restriction that the
  607. * cycle times have to be multiples of one another anyway, this means the
  608. * correction period can simply be the largest cycle time, hence the current
  609. * choice. This way, the updates are always synchronous to the transmission
  610. * cycle, and therefore predictable.
  611. *
  612. * The second implication is that at the beginning of a correction period, the
  613. * first few deltas will be modulated in time, until the schedule engine is
  614. * properly phase-aligned with the PTP clock. For this reason, you should place
  615. * your best-effort traffic at the beginning of a cycle, and your
  616. * time-triggered traffic afterwards.
  617. *
  618. * The third implication is that once the schedule engine is started, it can
  619. * only adjust for so much drift within a correction period. In the servo you
  620. * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you
  621. * want to do the latter, you need to stop and restart the schedule engine,
  622. * which is what the state machine handles.
  623. */
  624. static void sja1105_tas_state_machine(struct work_struct *work)
  625. {
  626. struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
  627. struct sja1105_private *priv = tas_to_sja1105(tas_data);
  628. struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
  629. struct timespec64 base_time_ts, now_ts;
  630. struct dsa_switch *ds = priv->ds;
  631. struct timespec64 diff;
  632. s64 base_time, now;
  633. int rc = 0;
  634. mutex_lock(&ptp_data->lock);
  635. switch (tas_data->state) {
  636. case SJA1105_TAS_STATE_DISABLED:
  637. /* Can't do anything at all if clock is still being stepped */
  638. if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
  639. break;
  640. rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
  641. if (rc < 0)
  642. break;
  643. rc = __sja1105_ptp_gettimex(ds, &now, NULL);
  644. if (rc < 0)
  645. break;
  646. /* Plan to start the earliest schedule first. The others
  647. * will be started in hardware, by way of their respective
  648. * entry points delta.
  649. * Try our best to avoid fringe cases (race condition between
  650. * ptpschtm and ptpstrtsch) by pushing the oper_base_time at
  651. * least one second in the future from now. This is not ideal,
  652. * but this only needs to buy us time until the
  653. * sja1105_tas_start command below gets executed.
  654. */
  655. base_time = future_base_time(tas_data->earliest_base_time,
  656. tas_data->max_cycle_time,
  657. now + 1ull * NSEC_PER_SEC);
  658. base_time -= sja1105_delta_to_ns(1);
  659. rc = sja1105_tas_set_base_time(priv, base_time);
  660. if (rc < 0)
  661. break;
  662. tas_data->oper_base_time = base_time;
  663. rc = sja1105_tas_start(priv);
  664. if (rc < 0)
  665. break;
  666. base_time_ts = ns_to_timespec64(base_time);
  667. now_ts = ns_to_timespec64(now);
  668. dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
  669. base_time_ts.tv_sec, base_time_ts.tv_nsec,
  670. now_ts.tv_sec, now_ts.tv_nsec);
  671. break;
  672. case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
  673. if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
  674. /* Clock was stepped.. bad news for TAS */
  675. sja1105_tas_stop(priv);
  676. break;
  677. }
  678. /* Check if TAS has actually started, by comparing the
  679. * scheduled start time with the SJA1105 PTP clock
  680. */
  681. rc = __sja1105_ptp_gettimex(ds, &now, NULL);
  682. if (rc < 0)
  683. break;
  684. if (now < tas_data->oper_base_time) {
  685. /* TAS has not started yet */
  686. diff = ns_to_timespec64(tas_data->oper_base_time - now);
  687. dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
  688. diff.tv_sec, diff.tv_nsec);
  689. break;
  690. }
  691. /* Time elapsed, what happened? */
  692. rc = sja1105_tas_check_running(priv);
  693. if (rc < 0)
  694. break;
  695. if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
  696. /* TAS has started */
  697. dev_err(ds->dev,
  698. "TAS not started despite time elapsed\n");
  699. break;
  700. case SJA1105_TAS_STATE_RUNNING:
  701. /* Clock was stepped.. bad news for TAS */
  702. if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
  703. sja1105_tas_stop(priv);
  704. break;
  705. }
  706. rc = sja1105_tas_check_running(priv);
  707. if (rc < 0)
  708. break;
  709. if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
  710. dev_err(ds->dev, "TAS surprisingly stopped\n");
  711. break;
  712. default:
  713. if (net_ratelimit())
  714. dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
  715. }
  716. if (rc && net_ratelimit())
  717. dev_err(ds->dev, "An operation returned %d\n", rc);
  718. mutex_unlock(&ptp_data->lock);
  719. }
  720. void sja1105_tas_clockstep(struct dsa_switch *ds)
  721. {
  722. struct sja1105_private *priv = ds->priv;
  723. struct sja1105_tas_data *tas_data = &priv->tas_data;
  724. if (!tas_data->enabled)
  725. return;
  726. tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
  727. schedule_work(&tas_data->tas_work);
  728. }
  729. void sja1105_tas_adjfreq(struct dsa_switch *ds)
  730. {
  731. struct sja1105_private *priv = ds->priv;
  732. struct sja1105_tas_data *tas_data = &priv->tas_data;
  733. if (!tas_data->enabled)
  734. return;
  735. /* No reason to schedule the workqueue, nothing changed */
  736. if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
  737. return;
  738. tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
  739. schedule_work(&tas_data->tas_work);
  740. }
  741. void sja1105_tas_setup(struct dsa_switch *ds)
  742. {
  743. struct sja1105_private *priv = ds->priv;
  744. struct sja1105_tas_data *tas_data = &priv->tas_data;
  745. INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
  746. tas_data->state = SJA1105_TAS_STATE_DISABLED;
  747. tas_data->last_op = SJA1105_PTP_NONE;
  748. INIT_LIST_HEAD(&tas_data->gating_cfg.entries);
  749. }
  750. void sja1105_tas_teardown(struct dsa_switch *ds)
  751. {
  752. struct sja1105_private *priv = ds->priv;
  753. struct tc_taprio_qopt_offload *offload;
  754. int port;
  755. cancel_work_sync(&priv->tas_data.tas_work);
  756. for (port = 0; port < ds->num_ports; port++) {
  757. offload = priv->tas_data.offload[port];
  758. if (!offload)
  759. continue;
  760. taprio_offload_free(offload);
  761. }
  762. }