mtk_ppe.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  3. #include <linux/kernel.h>
  4. #include <linux/io.h>
  5. #include <linux/iopoll.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/if_ether.h>
  9. #include <linux/if_vlan.h>
  10. #include <net/dsa.h>
  11. #include "mtk_eth_soc.h"
  12. #include "mtk_ppe.h"
  13. #include "mtk_ppe_regs.h"
  14. static DEFINE_SPINLOCK(ppe_lock);
  15. static const struct rhashtable_params mtk_flow_l2_ht_params = {
  16. .head_offset = offsetof(struct mtk_flow_entry, l2_node),
  17. .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
  18. .key_len = offsetof(struct mtk_foe_bridge, key_end),
  19. .automatic_shrinking = true,
  20. };
  21. static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
  22. {
  23. writel(val, ppe->base + reg);
  24. }
  25. static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
  26. {
  27. return readl(ppe->base + reg);
  28. }
  29. static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
  30. {
  31. u32 val;
  32. val = ppe_r32(ppe, reg);
  33. val &= ~mask;
  34. val |= set;
  35. ppe_w32(ppe, reg, val);
  36. return val;
  37. }
  38. static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
  39. {
  40. return ppe_m32(ppe, reg, 0, val);
  41. }
  42. static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
  43. {
  44. return ppe_m32(ppe, reg, val, 0);
  45. }
  46. static u32 mtk_eth_timestamp(struct mtk_eth *eth)
  47. {
  48. return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
  49. }
  50. static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
  51. {
  52. int ret;
  53. u32 val;
  54. ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
  55. !(val & MTK_PPE_GLO_CFG_BUSY),
  56. 20, MTK_PPE_WAIT_TIMEOUT_US);
  57. if (ret)
  58. dev_err(ppe->dev, "PPE table busy");
  59. return ret;
  60. }
  61. static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
  62. {
  63. ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
  64. ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
  65. }
  66. static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
  67. {
  68. mtk_ppe_cache_clear(ppe);
  69. ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
  70. enable * MTK_PPE_CACHE_CTL_EN);
  71. }
  72. static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
  73. {
  74. u32 hv1, hv2, hv3;
  75. u32 hash;
  76. switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
  77. case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  78. case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  79. hv1 = e->ipv4.orig.ports;
  80. hv2 = e->ipv4.orig.dest_ip;
  81. hv3 = e->ipv4.orig.src_ip;
  82. break;
  83. case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
  84. case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
  85. hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
  86. hv1 ^= e->ipv6.ports;
  87. hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
  88. hv2 ^= e->ipv6.dest_ip[0];
  89. hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
  90. hv3 ^= e->ipv6.src_ip[0];
  91. break;
  92. case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  93. case MTK_PPE_PKT_TYPE_IPV6_6RD:
  94. default:
  95. WARN_ON_ONCE(1);
  96. return MTK_PPE_HASH_MASK;
  97. }
  98. hash = (hv1 & hv2) | ((~hv1) & hv3);
  99. hash = (hash >> 24) | ((hash & 0xffffff) << 8);
  100. hash ^= hv1 ^ hv2 ^ hv3;
  101. hash ^= hash >> 16;
  102. hash <<= (ffs(eth->soc->hash_offset) - 1);
  103. hash &= MTK_PPE_ENTRIES - 1;
  104. return hash;
  105. }
  106. static inline struct mtk_foe_mac_info *
  107. mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
  108. {
  109. int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
  110. if (type == MTK_PPE_PKT_TYPE_BRIDGE)
  111. return &entry->bridge.l2;
  112. if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  113. return &entry->ipv6.l2;
  114. return &entry->ipv4.l2;
  115. }
  116. static inline u32 *
  117. mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
  118. {
  119. int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
  120. if (type == MTK_PPE_PKT_TYPE_BRIDGE)
  121. return &entry->bridge.ib2;
  122. if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  123. return &entry->ipv6.ib2;
  124. return &entry->ipv4.ib2;
  125. }
  126. int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  127. int type, int l4proto, u8 pse_port, u8 *src_mac,
  128. u8 *dest_mac)
  129. {
  130. struct mtk_foe_mac_info *l2;
  131. u32 ports_pad, val;
  132. memset(entry, 0, sizeof(*entry));
  133. if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  134. val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
  135. FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
  136. FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
  137. MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
  138. entry->ib1 = val;
  139. val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
  140. FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
  141. } else {
  142. val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
  143. FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
  144. FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
  145. MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
  146. entry->ib1 = val;
  147. val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
  148. FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
  149. FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
  150. }
  151. if (is_multicast_ether_addr(dest_mac))
  152. val |= mtk_get_ib2_multicast_mask(eth);
  153. ports_pad = 0xa5a5a500 | (l4proto & 0xff);
  154. if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
  155. entry->ipv4.orig.ports = ports_pad;
  156. if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
  157. entry->ipv6.ports = ports_pad;
  158. if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
  159. ether_addr_copy(entry->bridge.src_mac, src_mac);
  160. ether_addr_copy(entry->bridge.dest_mac, dest_mac);
  161. entry->bridge.ib2 = val;
  162. l2 = &entry->bridge.l2;
  163. } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
  164. entry->ipv6.ib2 = val;
  165. l2 = &entry->ipv6.l2;
  166. } else {
  167. entry->ipv4.ib2 = val;
  168. l2 = &entry->ipv4.l2;
  169. }
  170. l2->dest_mac_hi = get_unaligned_be32(dest_mac);
  171. l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
  172. l2->src_mac_hi = get_unaligned_be32(src_mac);
  173. l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
  174. if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
  175. l2->etype = ETH_P_IPV6;
  176. else
  177. l2->etype = ETH_P_IP;
  178. return 0;
  179. }
  180. int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
  181. struct mtk_foe_entry *entry, u8 port)
  182. {
  183. u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
  184. u32 val = *ib2;
  185. if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  186. val &= ~MTK_FOE_IB2_DEST_PORT_V2;
  187. val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
  188. } else {
  189. val &= ~MTK_FOE_IB2_DEST_PORT;
  190. val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
  191. }
  192. *ib2 = val;
  193. return 0;
  194. }
  195. int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
  196. struct mtk_foe_entry *entry, bool egress,
  197. __be32 src_addr, __be16 src_port,
  198. __be32 dest_addr, __be16 dest_port)
  199. {
  200. int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
  201. struct mtk_ipv4_tuple *t;
  202. switch (type) {
  203. case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  204. if (egress) {
  205. t = &entry->ipv4.new;
  206. break;
  207. }
  208. fallthrough;
  209. case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  210. case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  211. t = &entry->ipv4.orig;
  212. break;
  213. case MTK_PPE_PKT_TYPE_IPV6_6RD:
  214. entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
  215. entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
  216. return 0;
  217. default:
  218. WARN_ON_ONCE(1);
  219. return -EINVAL;
  220. }
  221. t->src_ip = be32_to_cpu(src_addr);
  222. t->dest_ip = be32_to_cpu(dest_addr);
  223. if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
  224. return 0;
  225. t->src_port = be16_to_cpu(src_port);
  226. t->dest_port = be16_to_cpu(dest_port);
  227. return 0;
  228. }
  229. int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
  230. struct mtk_foe_entry *entry,
  231. __be32 *src_addr, __be16 src_port,
  232. __be32 *dest_addr, __be16 dest_port)
  233. {
  234. int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
  235. u32 *src, *dest;
  236. int i;
  237. switch (type) {
  238. case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
  239. src = entry->dslite.tunnel_src_ip;
  240. dest = entry->dslite.tunnel_dest_ip;
  241. break;
  242. case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
  243. case MTK_PPE_PKT_TYPE_IPV6_6RD:
  244. entry->ipv6.src_port = be16_to_cpu(src_port);
  245. entry->ipv6.dest_port = be16_to_cpu(dest_port);
  246. fallthrough;
  247. case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
  248. src = entry->ipv6.src_ip;
  249. dest = entry->ipv6.dest_ip;
  250. break;
  251. default:
  252. WARN_ON_ONCE(1);
  253. return -EINVAL;
  254. }
  255. for (i = 0; i < 4; i++)
  256. src[i] = be32_to_cpu(src_addr[i]);
  257. for (i = 0; i < 4; i++)
  258. dest[i] = be32_to_cpu(dest_addr[i]);
  259. return 0;
  260. }
  261. int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  262. int port)
  263. {
  264. struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  265. l2->etype = BIT(port);
  266. if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
  267. entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
  268. else
  269. l2->etype |= BIT(8);
  270. entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
  271. return 0;
  272. }
  273. int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  274. int vid)
  275. {
  276. struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  277. switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
  278. case 0:
  279. entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
  280. mtk_prep_ib1_vlan_layer(eth, 1);
  281. l2->vlan1 = vid;
  282. return 0;
  283. case 1:
  284. if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
  285. l2->vlan1 = vid;
  286. l2->etype |= BIT(8);
  287. } else {
  288. l2->vlan2 = vid;
  289. entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
  290. }
  291. return 0;
  292. default:
  293. return -ENOSPC;
  294. }
  295. }
  296. int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  297. int sid)
  298. {
  299. struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  300. if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
  301. (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
  302. l2->etype = ETH_P_PPP_SES;
  303. entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
  304. l2->pppoe_id = sid;
  305. return 0;
  306. }
  307. int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  308. int wdma_idx, int txq, int bss, int wcid)
  309. {
  310. struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  311. u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
  312. if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  313. *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
  314. *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
  315. MTK_FOE_IB2_WDMA_WINFO_V2;
  316. l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
  317. FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
  318. } else {
  319. *ib2 &= ~MTK_FOE_IB2_PORT_MG;
  320. *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
  321. if (wdma_idx)
  322. *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
  323. l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
  324. FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
  325. FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
  326. }
  327. return 0;
  328. }
  329. static bool
  330. mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
  331. struct mtk_foe_entry *data)
  332. {
  333. int type, len;
  334. if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
  335. return false;
  336. type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
  337. if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  338. len = offsetof(struct mtk_foe_entry, ipv6._rsv);
  339. else
  340. len = offsetof(struct mtk_foe_entry, ipv4.ib2);
  341. return !memcmp(&entry->data.data, &data->data, len - 4);
  342. }
  343. static void
  344. __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  345. {
  346. struct hlist_head *head;
  347. struct hlist_node *tmp;
  348. if (entry->type == MTK_FLOW_TYPE_L2) {
  349. rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
  350. mtk_flow_l2_ht_params);
  351. head = &entry->l2_flows;
  352. hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
  353. __mtk_foe_entry_clear(ppe, entry);
  354. return;
  355. }
  356. hlist_del_init(&entry->list);
  357. if (entry->hash != 0xffff) {
  358. struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
  359. hwe->ib1 &= ~MTK_FOE_IB1_STATE;
  360. hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
  361. dma_wmb();
  362. mtk_ppe_cache_clear(ppe);
  363. }
  364. entry->hash = 0xffff;
  365. if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
  366. return;
  367. hlist_del_init(&entry->l2_data.list);
  368. kfree(entry);
  369. }
  370. static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
  371. {
  372. u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
  373. u16 now = mtk_eth_timestamp(ppe->eth);
  374. u16 timestamp = ib1 & ib1_ts_mask;
  375. if (timestamp > now)
  376. return ib1_ts_mask + 1 - timestamp + now;
  377. else
  378. return now - timestamp;
  379. }
  380. static void
  381. mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  382. {
  383. u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
  384. struct mtk_flow_entry *cur;
  385. struct mtk_foe_entry *hwe;
  386. struct hlist_node *tmp;
  387. int idle;
  388. idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
  389. hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
  390. int cur_idle;
  391. u32 ib1;
  392. hwe = mtk_foe_get_entry(ppe, cur->hash);
  393. ib1 = READ_ONCE(hwe->ib1);
  394. if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
  395. cur->hash = 0xffff;
  396. __mtk_foe_entry_clear(ppe, cur);
  397. continue;
  398. }
  399. cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
  400. if (cur_idle >= idle)
  401. continue;
  402. idle = cur_idle;
  403. entry->data.ib1 &= ~ib1_ts_mask;
  404. entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
  405. }
  406. }
  407. static void
  408. mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  409. {
  410. struct mtk_foe_entry foe = {};
  411. struct mtk_foe_entry *hwe;
  412. spin_lock_bh(&ppe_lock);
  413. if (entry->type == MTK_FLOW_TYPE_L2) {
  414. mtk_flow_entry_update_l2(ppe, entry);
  415. goto out;
  416. }
  417. if (entry->hash == 0xffff)
  418. goto out;
  419. hwe = mtk_foe_get_entry(ppe, entry->hash);
  420. memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
  421. if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
  422. entry->hash = 0xffff;
  423. goto out;
  424. }
  425. entry->data.ib1 = foe.ib1;
  426. out:
  427. spin_unlock_bh(&ppe_lock);
  428. }
  429. static void
  430. __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
  431. u16 hash)
  432. {
  433. struct mtk_eth *eth = ppe->eth;
  434. u16 timestamp = mtk_eth_timestamp(eth);
  435. struct mtk_foe_entry *hwe;
  436. if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  437. entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
  438. entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
  439. timestamp);
  440. } else {
  441. entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
  442. entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
  443. timestamp);
  444. }
  445. hwe = mtk_foe_get_entry(ppe, hash);
  446. memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
  447. wmb();
  448. hwe->ib1 = entry->ib1;
  449. dma_wmb();
  450. mtk_ppe_cache_clear(ppe);
  451. }
  452. void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  453. {
  454. spin_lock_bh(&ppe_lock);
  455. __mtk_foe_entry_clear(ppe, entry);
  456. spin_unlock_bh(&ppe_lock);
  457. }
  458. static int
  459. mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  460. {
  461. entry->type = MTK_FLOW_TYPE_L2;
  462. return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
  463. mtk_flow_l2_ht_params);
  464. }
  465. int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  466. {
  467. const struct mtk_soc_data *soc = ppe->eth->soc;
  468. int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
  469. u32 hash;
  470. if (type == MTK_PPE_PKT_TYPE_BRIDGE)
  471. return mtk_foe_entry_commit_l2(ppe, entry);
  472. hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
  473. entry->hash = 0xffff;
  474. spin_lock_bh(&ppe_lock);
  475. hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
  476. spin_unlock_bh(&ppe_lock);
  477. return 0;
  478. }
  479. static void
  480. mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
  481. u16 hash)
  482. {
  483. const struct mtk_soc_data *soc = ppe->eth->soc;
  484. struct mtk_flow_entry *flow_info;
  485. struct mtk_foe_entry foe = {}, *hwe;
  486. struct mtk_foe_mac_info *l2;
  487. u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
  488. int type;
  489. flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
  490. if (!flow_info)
  491. return;
  492. flow_info->l2_data.base_flow = entry;
  493. flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
  494. flow_info->hash = hash;
  495. hlist_add_head(&flow_info->list,
  496. &ppe->foe_flow[hash / soc->hash_offset]);
  497. hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
  498. hwe = mtk_foe_get_entry(ppe, hash);
  499. memcpy(&foe, hwe, soc->foe_entry_size);
  500. foe.ib1 &= ib1_mask;
  501. foe.ib1 |= entry->data.ib1 & ~ib1_mask;
  502. l2 = mtk_foe_entry_l2(ppe->eth, &foe);
  503. memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
  504. type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
  505. if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
  506. memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
  507. else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
  508. l2->etype = ETH_P_IPV6;
  509. *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
  510. __mtk_foe_entry_commit(ppe, &foe, hash);
  511. }
  512. void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
  513. {
  514. const struct mtk_soc_data *soc = ppe->eth->soc;
  515. struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
  516. struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
  517. struct mtk_flow_entry *entry;
  518. struct mtk_foe_bridge key = {};
  519. struct hlist_node *n;
  520. struct ethhdr *eh;
  521. bool found = false;
  522. u8 *tag;
  523. spin_lock_bh(&ppe_lock);
  524. if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
  525. goto out;
  526. hlist_for_each_entry_safe(entry, n, head, list) {
  527. if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
  528. if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
  529. MTK_FOE_STATE_BIND))
  530. continue;
  531. entry->hash = 0xffff;
  532. __mtk_foe_entry_clear(ppe, entry);
  533. continue;
  534. }
  535. if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
  536. if (entry->hash != 0xffff)
  537. entry->hash = 0xffff;
  538. continue;
  539. }
  540. entry->hash = hash;
  541. __mtk_foe_entry_commit(ppe, &entry->data, hash);
  542. found = true;
  543. }
  544. if (found)
  545. goto out;
  546. eh = eth_hdr(skb);
  547. ether_addr_copy(key.dest_mac, eh->h_dest);
  548. ether_addr_copy(key.src_mac, eh->h_source);
  549. tag = skb->data - 2;
  550. key.vlan = 0;
  551. switch (skb->protocol) {
  552. #if IS_ENABLED(CONFIG_NET_DSA)
  553. case htons(ETH_P_XDSA):
  554. if (!netdev_uses_dsa(skb->dev) ||
  555. skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
  556. goto out;
  557. tag += 4;
  558. if (get_unaligned_be16(tag) != ETH_P_8021Q)
  559. break;
  560. fallthrough;
  561. #endif
  562. case htons(ETH_P_8021Q):
  563. key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
  564. break;
  565. default:
  566. break;
  567. }
  568. entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
  569. if (!entry)
  570. goto out;
  571. mtk_foe_entry_commit_subflow(ppe, entry, hash);
  572. out:
  573. spin_unlock_bh(&ppe_lock);
  574. }
  575. int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  576. {
  577. mtk_flow_entry_update(ppe, entry);
  578. return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
  579. }
  580. struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
  581. int version, int index)
  582. {
  583. const struct mtk_soc_data *soc = eth->soc;
  584. struct device *dev = eth->dev;
  585. struct mtk_ppe *ppe;
  586. u32 foe_flow_size;
  587. void *foe;
  588. ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
  589. if (!ppe)
  590. return NULL;
  591. rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
  592. /* need to allocate a separate device, since it PPE DMA access is
  593. * not coherent.
  594. */
  595. ppe->base = base;
  596. ppe->eth = eth;
  597. ppe->dev = dev;
  598. ppe->version = version;
  599. foe = dmam_alloc_coherent(ppe->dev,
  600. MTK_PPE_ENTRIES * soc->foe_entry_size,
  601. &ppe->foe_phys, GFP_KERNEL);
  602. if (!foe)
  603. goto err_free_l2_flows;
  604. ppe->foe_table = foe;
  605. foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
  606. sizeof(*ppe->foe_flow);
  607. ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
  608. if (!ppe->foe_flow)
  609. goto err_free_l2_flows;
  610. mtk_ppe_debugfs_init(ppe, index);
  611. return ppe;
  612. err_free_l2_flows:
  613. rhashtable_destroy(&ppe->l2_flows);
  614. return NULL;
  615. }
  616. void mtk_ppe_deinit(struct mtk_eth *eth)
  617. {
  618. int i;
  619. for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
  620. if (!eth->ppe[i])
  621. return;
  622. rhashtable_destroy(&eth->ppe[i]->l2_flows);
  623. }
  624. }
  625. static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
  626. {
  627. static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
  628. int i, k;
  629. memset(ppe->foe_table, 0,
  630. MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
  631. if (!IS_ENABLED(CONFIG_SOC_MT7621))
  632. return;
  633. /* skip all entries that cross the 1024 byte boundary */
  634. for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
  635. for (k = 0; k < ARRAY_SIZE(skip); k++) {
  636. struct mtk_foe_entry *hwe;
  637. hwe = mtk_foe_get_entry(ppe, i + skip[k]);
  638. hwe->ib1 |= MTK_FOE_IB1_STATIC;
  639. }
  640. }
  641. }
  642. void mtk_ppe_start(struct mtk_ppe *ppe)
  643. {
  644. u32 val;
  645. if (!ppe)
  646. return;
  647. mtk_ppe_init_foe_table(ppe);
  648. ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
  649. val = MTK_PPE_TB_CFG_ENTRY_80B |
  650. MTK_PPE_TB_CFG_AGE_NON_L4 |
  651. MTK_PPE_TB_CFG_AGE_UNBIND |
  652. MTK_PPE_TB_CFG_AGE_TCP |
  653. MTK_PPE_TB_CFG_AGE_UDP |
  654. MTK_PPE_TB_CFG_AGE_TCP_FIN |
  655. FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
  656. MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
  657. FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
  658. MTK_PPE_KEEPALIVE_DISABLE) |
  659. FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
  660. FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
  661. MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
  662. FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
  663. MTK_PPE_ENTRIES_SHIFT);
  664. if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
  665. val |= MTK_PPE_TB_CFG_INFO_SEL;
  666. ppe_w32(ppe, MTK_PPE_TB_CFG, val);
  667. ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
  668. MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
  669. mtk_ppe_cache_enable(ppe, true);
  670. val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
  671. MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
  672. MTK_PPE_FLOW_CFG_IP6_6RD |
  673. MTK_PPE_FLOW_CFG_IP4_NAT |
  674. MTK_PPE_FLOW_CFG_IP4_NAPT |
  675. MTK_PPE_FLOW_CFG_IP4_DSLITE |
  676. MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
  677. if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
  678. val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
  679. MTK_PPE_MD_TOAP_BYP_CRSN1 |
  680. MTK_PPE_MD_TOAP_BYP_CRSN2 |
  681. MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
  682. else
  683. val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
  684. MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
  685. ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
  686. val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
  687. FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
  688. ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
  689. val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
  690. FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
  691. ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
  692. val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
  693. FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
  694. ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
  695. val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
  696. ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
  697. val = MTK_PPE_BIND_LIMIT1_FULL |
  698. FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
  699. ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
  700. val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
  701. FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
  702. ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
  703. /* enable PPE */
  704. val = MTK_PPE_GLO_CFG_EN |
  705. MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
  706. MTK_PPE_GLO_CFG_IP4_CS_DROP |
  707. MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
  708. ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
  709. ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
  710. if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
  711. ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
  712. ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
  713. }
  714. }
  715. int mtk_ppe_stop(struct mtk_ppe *ppe)
  716. {
  717. u32 val;
  718. int i;
  719. if (!ppe)
  720. return 0;
  721. for (i = 0; i < MTK_PPE_ENTRIES; i++) {
  722. struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
  723. hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
  724. MTK_FOE_STATE_INVALID);
  725. }
  726. mtk_ppe_cache_enable(ppe, false);
  727. /* disable offload engine */
  728. ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
  729. ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
  730. /* disable aging */
  731. val = MTK_PPE_TB_CFG_AGE_NON_L4 |
  732. MTK_PPE_TB_CFG_AGE_UNBIND |
  733. MTK_PPE_TB_CFG_AGE_TCP |
  734. MTK_PPE_TB_CFG_AGE_UDP |
  735. MTK_PPE_TB_CFG_AGE_TCP_FIN;
  736. ppe_clear(ppe, MTK_PPE_TB_CFG, val);
  737. return mtk_ppe_wait_busy(ppe);
  738. }