am65-cpsw-switchdev.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Texas Instruments K3 AM65 Ethernet Switchdev Driver
  3. *
  4. * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
  5. *
  6. */
  7. #include <linux/etherdevice.h>
  8. #include <linux/if_bridge.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/workqueue.h>
  11. #include <net/switchdev.h>
  12. #include "am65-cpsw-nuss.h"
  13. #include "am65-cpsw-switchdev.h"
  14. #include "cpsw_ale.h"
  15. struct am65_cpsw_switchdev_event_work {
  16. struct work_struct work;
  17. struct switchdev_notifier_fdb_info fdb_info;
  18. struct am65_cpsw_port *port;
  19. unsigned long event;
  20. };
  21. static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state)
  22. {
  23. struct am65_cpsw_common *cpsw = port->common;
  24. u8 cpsw_state;
  25. int ret = 0;
  26. switch (state) {
  27. case BR_STATE_FORWARDING:
  28. cpsw_state = ALE_PORT_STATE_FORWARD;
  29. break;
  30. case BR_STATE_LEARNING:
  31. cpsw_state = ALE_PORT_STATE_LEARN;
  32. break;
  33. case BR_STATE_DISABLED:
  34. cpsw_state = ALE_PORT_STATE_DISABLE;
  35. break;
  36. case BR_STATE_LISTENING:
  37. case BR_STATE_BLOCKING:
  38. cpsw_state = ALE_PORT_STATE_BLOCK;
  39. break;
  40. default:
  41. return -EOPNOTSUPP;
  42. }
  43. ret = cpsw_ale_control_set(cpsw->ale, port->port_id,
  44. ALE_PORT_STATE, cpsw_state);
  45. netdev_dbg(port->ndev, "ale state: %u\n", cpsw_state);
  46. return ret;
  47. }
  48. static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
  49. struct net_device *orig_dev,
  50. struct switchdev_brport_flags flags)
  51. {
  52. struct am65_cpsw_common *cpsw = port->common;
  53. if (flags.mask & BR_MCAST_FLOOD) {
  54. bool unreg_mcast_add = false;
  55. if (flags.val & BR_MCAST_FLOOD)
  56. unreg_mcast_add = true;
  57. netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
  58. unreg_mcast_add, port->port_id);
  59. cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
  60. unreg_mcast_add);
  61. }
  62. return 0;
  63. }
  64. static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
  65. struct switchdev_brport_flags flags)
  66. {
  67. if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
  68. return -EINVAL;
  69. return 0;
  70. }
  71. static int am65_cpsw_port_attr_set(struct net_device *ndev, const void *ctx,
  72. const struct switchdev_attr *attr,
  73. struct netlink_ext_ack *extack)
  74. {
  75. struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
  76. int ret;
  77. netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, port->port_id);
  78. switch (attr->id) {
  79. case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
  80. ret = am65_cpsw_port_attr_br_flags_pre_set(ndev,
  81. attr->u.brport_flags);
  82. break;
  83. case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
  84. ret = am65_cpsw_port_stp_state_set(port, attr->u.stp_state);
  85. netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
  86. break;
  87. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  88. ret = am65_cpsw_port_attr_br_flags_set(port, attr->orig_dev,
  89. attr->u.brport_flags);
  90. break;
  91. default:
  92. ret = -EOPNOTSUPP;
  93. break;
  94. }
  95. return ret;
  96. }
  97. static u16 am65_cpsw_get_pvid(struct am65_cpsw_port *port)
  98. {
  99. struct am65_cpsw_common *cpsw = port->common;
  100. struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
  101. u32 pvid;
  102. if (port->port_id)
  103. pvid = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
  104. else
  105. pvid = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
  106. pvid = pvid & 0xfff;
  107. return pvid;
  108. }
  109. static void am65_cpsw_set_pvid(struct am65_cpsw_port *port, u16 vid, bool cfi, u32 cos)
  110. {
  111. struct am65_cpsw_common *cpsw = port->common;
  112. struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
  113. u32 pvid;
  114. pvid = vid;
  115. pvid |= cfi ? BIT(12) : 0;
  116. pvid |= (cos & 0x7) << 13;
  117. if (port->port_id)
  118. writel(pvid, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
  119. else
  120. writel(pvid, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
  121. }
  122. static int am65_cpsw_port_vlan_add(struct am65_cpsw_port *port, bool untag, bool pvid,
  123. u16 vid, struct net_device *orig_dev)
  124. {
  125. bool cpu_port = netif_is_bridge_master(orig_dev);
  126. struct am65_cpsw_common *cpsw = port->common;
  127. int unreg_mcast_mask = 0;
  128. int reg_mcast_mask = 0;
  129. int untag_mask = 0;
  130. int port_mask;
  131. int ret = 0;
  132. u32 flags;
  133. if (cpu_port) {
  134. port_mask = BIT(HOST_PORT_NUM);
  135. flags = orig_dev->flags;
  136. unreg_mcast_mask = port_mask;
  137. } else {
  138. port_mask = BIT(port->port_id);
  139. flags = port->ndev->flags;
  140. }
  141. if (flags & IFF_MULTICAST)
  142. reg_mcast_mask = port_mask;
  143. if (untag)
  144. untag_mask = port_mask;
  145. ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
  146. reg_mcast_mask, unreg_mcast_mask);
  147. if (ret) {
  148. netdev_err(port->ndev, "Unable to add vlan\n");
  149. return ret;
  150. }
  151. if (cpu_port)
  152. cpsw_ale_add_ucast(cpsw->ale, port->slave.mac_addr,
  153. HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, vid);
  154. if (!pvid)
  155. return ret;
  156. am65_cpsw_set_pvid(port, vid, 0, 0);
  157. netdev_dbg(port->ndev, "VID add: %s: vid:%u ports:%X\n",
  158. port->ndev->name, vid, port_mask);
  159. return ret;
  160. }
  161. static int am65_cpsw_port_vlan_del(struct am65_cpsw_port *port, u16 vid,
  162. struct net_device *orig_dev)
  163. {
  164. bool cpu_port = netif_is_bridge_master(orig_dev);
  165. struct am65_cpsw_common *cpsw = port->common;
  166. int port_mask;
  167. int ret = 0;
  168. if (cpu_port)
  169. port_mask = BIT(HOST_PORT_NUM);
  170. else
  171. port_mask = BIT(port->port_id);
  172. ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
  173. if (ret != 0)
  174. return ret;
  175. /* We don't care for the return value here, error is returned only if
  176. * the unicast entry is not present
  177. */
  178. if (cpu_port)
  179. cpsw_ale_del_ucast(cpsw->ale, port->slave.mac_addr,
  180. HOST_PORT_NUM, ALE_VLAN, vid);
  181. if (vid == am65_cpsw_get_pvid(port))
  182. am65_cpsw_set_pvid(port, 0, 0, 0);
  183. /* We don't care for the return value here, error is returned only if
  184. * the multicast entry is not present
  185. */
  186. cpsw_ale_del_mcast(cpsw->ale, port->ndev->broadcast, port_mask,
  187. ALE_VLAN, vid);
  188. netdev_dbg(port->ndev, "VID del: %s: vid:%u ports:%X\n",
  189. port->ndev->name, vid, port_mask);
  190. return ret;
  191. }
  192. static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
  193. const struct switchdev_obj_port_vlan *vlan)
  194. {
  195. bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  196. struct net_device *orig_dev = vlan->obj.orig_dev;
  197. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  198. netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
  199. port->ndev->name, vlan->vid, vlan->flags);
  200. return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
  201. }
  202. static int am65_cpsw_port_vlans_del(struct am65_cpsw_port *port,
  203. const struct switchdev_obj_port_vlan *vlan)
  204. {
  205. return am65_cpsw_port_vlan_del(port, vlan->vid, vlan->obj.orig_dev);
  206. }
  207. static int am65_cpsw_port_mdb_add(struct am65_cpsw_port *port,
  208. struct switchdev_obj_port_mdb *mdb)
  209. {
  210. struct net_device *orig_dev = mdb->obj.orig_dev;
  211. bool cpu_port = netif_is_bridge_master(orig_dev);
  212. struct am65_cpsw_common *cpsw = port->common;
  213. int port_mask;
  214. int err;
  215. if (cpu_port)
  216. port_mask = BIT(HOST_PORT_NUM);
  217. else
  218. port_mask = BIT(port->port_id);
  219. err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
  220. ALE_VLAN, mdb->vid, 0);
  221. netdev_dbg(port->ndev, "MDB add: %s: vid %u:%pM ports: %X\n",
  222. port->ndev->name, mdb->vid, mdb->addr, port_mask);
  223. return err;
  224. }
  225. static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
  226. struct switchdev_obj_port_mdb *mdb)
  227. {
  228. struct net_device *orig_dev = mdb->obj.orig_dev;
  229. bool cpu_port = netif_is_bridge_master(orig_dev);
  230. struct am65_cpsw_common *cpsw = port->common;
  231. int del_mask;
  232. if (cpu_port)
  233. del_mask = BIT(HOST_PORT_NUM);
  234. else
  235. del_mask = BIT(port->port_id);
  236. /* Ignore error as error code is returned only when entry is already removed */
  237. cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
  238. ALE_VLAN, mdb->vid);
  239. netdev_dbg(port->ndev, "MDB del: %s: vid %u:%pM ports: %X\n",
  240. port->ndev->name, mdb->vid, mdb->addr, del_mask);
  241. return 0;
  242. }
  243. static int am65_cpsw_port_obj_add(struct net_device *ndev, const void *ctx,
  244. const struct switchdev_obj *obj,
  245. struct netlink_ext_ack *extack)
  246. {
  247. struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
  248. struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
  249. struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
  250. int err = 0;
  251. netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, port->port_id);
  252. switch (obj->id) {
  253. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  254. err = am65_cpsw_port_vlans_add(port, vlan);
  255. break;
  256. case SWITCHDEV_OBJ_ID_PORT_MDB:
  257. case SWITCHDEV_OBJ_ID_HOST_MDB:
  258. err = am65_cpsw_port_mdb_add(port, mdb);
  259. break;
  260. default:
  261. err = -EOPNOTSUPP;
  262. break;
  263. }
  264. return err;
  265. }
  266. static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
  267. const struct switchdev_obj *obj)
  268. {
  269. struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
  270. struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
  271. struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
  272. int err = 0;
  273. netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, port->port_id);
  274. switch (obj->id) {
  275. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  276. err = am65_cpsw_port_vlans_del(port, vlan);
  277. break;
  278. case SWITCHDEV_OBJ_ID_PORT_MDB:
  279. case SWITCHDEV_OBJ_ID_HOST_MDB:
  280. err = am65_cpsw_port_mdb_del(port, mdb);
  281. break;
  282. default:
  283. err = -EOPNOTSUPP;
  284. break;
  285. }
  286. return err;
  287. }
  288. static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
  289. struct switchdev_notifier_fdb_info *rcv)
  290. {
  291. struct switchdev_notifier_fdb_info info = {};
  292. info.addr = rcv->addr;
  293. info.vid = rcv->vid;
  294. info.offloaded = true;
  295. call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
  296. ndev, &info.info, NULL);
  297. }
  298. static void am65_cpsw_switchdev_event_work(struct work_struct *work)
  299. {
  300. struct am65_cpsw_switchdev_event_work *switchdev_work =
  301. container_of(work, struct am65_cpsw_switchdev_event_work, work);
  302. struct am65_cpsw_port *port = switchdev_work->port;
  303. struct switchdev_notifier_fdb_info *fdb;
  304. struct am65_cpsw_common *cpsw = port->common;
  305. int port_id = port->port_id;
  306. rtnl_lock();
  307. switch (switchdev_work->event) {
  308. case SWITCHDEV_FDB_ADD_TO_DEVICE:
  309. fdb = &switchdev_work->fdb_info;
  310. netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
  311. fdb->addr, fdb->vid, fdb->added_by_user,
  312. fdb->offloaded, port_id);
  313. if (!fdb->added_by_user || fdb->is_local)
  314. break;
  315. if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
  316. port_id = HOST_PORT_NUM;
  317. cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
  318. fdb->vid ? ALE_VLAN : 0, fdb->vid);
  319. am65_cpsw_fdb_offload_notify(port->ndev, fdb);
  320. break;
  321. case SWITCHDEV_FDB_DEL_TO_DEVICE:
  322. fdb = &switchdev_work->fdb_info;
  323. netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
  324. fdb->addr, fdb->vid, fdb->added_by_user,
  325. fdb->offloaded, port_id);
  326. if (!fdb->added_by_user || fdb->is_local)
  327. break;
  328. if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
  329. port_id = HOST_PORT_NUM;
  330. cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
  331. fdb->vid ? ALE_VLAN : 0, fdb->vid);
  332. break;
  333. default:
  334. break;
  335. }
  336. rtnl_unlock();
  337. kfree(switchdev_work->fdb_info.addr);
  338. kfree(switchdev_work);
  339. dev_put(port->ndev);
  340. }
  341. /* called under rcu_read_lock() */
  342. static int am65_cpsw_switchdev_event(struct notifier_block *unused,
  343. unsigned long event, void *ptr)
  344. {
  345. struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
  346. struct am65_cpsw_switchdev_event_work *switchdev_work;
  347. struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
  348. struct switchdev_notifier_fdb_info *fdb_info = ptr;
  349. int err;
  350. if (event == SWITCHDEV_PORT_ATTR_SET) {
  351. err = switchdev_handle_port_attr_set(ndev, ptr,
  352. am65_cpsw_port_dev_check,
  353. am65_cpsw_port_attr_set);
  354. return notifier_from_errno(err);
  355. }
  356. if (!am65_cpsw_port_dev_check(ndev))
  357. return NOTIFY_DONE;
  358. switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
  359. if (WARN_ON(!switchdev_work))
  360. return NOTIFY_BAD;
  361. INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
  362. switchdev_work->port = port;
  363. switchdev_work->event = event;
  364. switch (event) {
  365. case SWITCHDEV_FDB_ADD_TO_DEVICE:
  366. case SWITCHDEV_FDB_DEL_TO_DEVICE:
  367. memcpy(&switchdev_work->fdb_info, ptr,
  368. sizeof(switchdev_work->fdb_info));
  369. switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
  370. if (!switchdev_work->fdb_info.addr)
  371. goto err_addr_alloc;
  372. ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
  373. fdb_info->addr);
  374. dev_hold(ndev);
  375. break;
  376. default:
  377. kfree(switchdev_work);
  378. return NOTIFY_DONE;
  379. }
  380. queue_work(system_long_wq, &switchdev_work->work);
  381. return NOTIFY_DONE;
  382. err_addr_alloc:
  383. kfree(switchdev_work);
  384. return NOTIFY_BAD;
  385. }
  386. static struct notifier_block cpsw_switchdev_notifier = {
  387. .notifier_call = am65_cpsw_switchdev_event,
  388. };
  389. static int am65_cpsw_switchdev_blocking_event(struct notifier_block *unused,
  390. unsigned long event, void *ptr)
  391. {
  392. struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
  393. int err;
  394. switch (event) {
  395. case SWITCHDEV_PORT_OBJ_ADD:
  396. err = switchdev_handle_port_obj_add(dev, ptr,
  397. am65_cpsw_port_dev_check,
  398. am65_cpsw_port_obj_add);
  399. return notifier_from_errno(err);
  400. case SWITCHDEV_PORT_OBJ_DEL:
  401. err = switchdev_handle_port_obj_del(dev, ptr,
  402. am65_cpsw_port_dev_check,
  403. am65_cpsw_port_obj_del);
  404. return notifier_from_errno(err);
  405. case SWITCHDEV_PORT_ATTR_SET:
  406. err = switchdev_handle_port_attr_set(dev, ptr,
  407. am65_cpsw_port_dev_check,
  408. am65_cpsw_port_attr_set);
  409. return notifier_from_errno(err);
  410. default:
  411. break;
  412. }
  413. return NOTIFY_DONE;
  414. }
  415. static struct notifier_block cpsw_switchdev_bl_notifier = {
  416. .notifier_call = am65_cpsw_switchdev_blocking_event,
  417. };
  418. int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
  419. {
  420. int ret = 0;
  421. ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
  422. if (ret) {
  423. dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
  424. ret);
  425. return ret;
  426. }
  427. ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
  428. if (ret) {
  429. dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
  430. ret);
  431. unregister_switchdev_notifier(&cpsw_switchdev_notifier);
  432. }
  433. return ret;
  434. }
  435. void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
  436. {
  437. unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
  438. unregister_switchdev_notifier(&cpsw_switchdev_notifier);
  439. }