xdp.h 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #undef TRACE_SYSTEM
  3. #define TRACE_SYSTEM xdp
  4. #if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
  5. #define _TRACE_XDP_H
  6. #include <linux/netdevice.h>
  7. #include <linux/filter.h>
  8. #include <linux/tracepoint.h>
  9. #include <linux/bpf.h>
  10. #define __XDP_ACT_MAP(FN) \
  11. FN(ABORTED) \
  12. FN(DROP) \
  13. FN(PASS) \
  14. FN(TX) \
  15. FN(REDIRECT)
  16. #define __XDP_ACT_TP_FN(x) \
  17. TRACE_DEFINE_ENUM(XDP_##x);
  18. #define __XDP_ACT_SYM_FN(x) \
  19. { XDP_##x, #x },
  20. #define __XDP_ACT_SYM_TAB \
  21. __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
  22. __XDP_ACT_MAP(__XDP_ACT_TP_FN)
  23. TRACE_EVENT(xdp_exception,
  24. TP_PROTO(const struct net_device *dev,
  25. const struct bpf_prog *xdp, u32 act),
  26. TP_ARGS(dev, xdp, act),
  27. TP_STRUCT__entry(
  28. __field(int, prog_id)
  29. __field(u32, act)
  30. __field(int, ifindex)
  31. ),
  32. TP_fast_assign(
  33. __entry->prog_id = xdp->aux->id;
  34. __entry->act = act;
  35. __entry->ifindex = dev->ifindex;
  36. ),
  37. TP_printk("prog_id=%d action=%s ifindex=%d",
  38. __entry->prog_id,
  39. __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
  40. __entry->ifindex)
  41. );
  42. TRACE_EVENT(xdp_bulk_tx,
  43. TP_PROTO(const struct net_device *dev,
  44. int sent, int drops, int err),
  45. TP_ARGS(dev, sent, drops, err),
  46. TP_STRUCT__entry(
  47. __field(int, ifindex)
  48. __field(u32, act)
  49. __field(int, drops)
  50. __field(int, sent)
  51. __field(int, err)
  52. ),
  53. TP_fast_assign(
  54. __entry->ifindex = dev->ifindex;
  55. __entry->act = XDP_TX;
  56. __entry->drops = drops;
  57. __entry->sent = sent;
  58. __entry->err = err;
  59. ),
  60. TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
  61. __entry->ifindex,
  62. __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
  63. __entry->sent, __entry->drops, __entry->err)
  64. );
  65. #ifndef __DEVMAP_OBJ_TYPE
  66. #define __DEVMAP_OBJ_TYPE
  67. struct _bpf_dtab_netdev {
  68. struct net_device *dev;
  69. };
  70. #endif /* __DEVMAP_OBJ_TYPE */
  71. DECLARE_EVENT_CLASS(xdp_redirect_template,
  72. TP_PROTO(const struct net_device *dev,
  73. const struct bpf_prog *xdp,
  74. const void *tgt, int err,
  75. enum bpf_map_type map_type,
  76. u32 map_id, u32 index),
  77. TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
  78. TP_STRUCT__entry(
  79. __field(int, prog_id)
  80. __field(u32, act)
  81. __field(int, ifindex)
  82. __field(int, err)
  83. __field(int, to_ifindex)
  84. __field(u32, map_id)
  85. __field(int, map_index)
  86. ),
  87. TP_fast_assign(
  88. u32 ifindex = 0, map_index = index;
  89. if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
  90. /* Just leave to_ifindex to 0 if do broadcast redirect,
  91. * as tgt will be NULL.
  92. */
  93. if (tgt)
  94. ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
  95. } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
  96. ifindex = index;
  97. map_index = 0;
  98. }
  99. __entry->prog_id = xdp->aux->id;
  100. __entry->act = XDP_REDIRECT;
  101. __entry->ifindex = dev->ifindex;
  102. __entry->err = err;
  103. __entry->to_ifindex = ifindex;
  104. __entry->map_id = map_id;
  105. __entry->map_index = map_index;
  106. ),
  107. TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
  108. " map_id=%d map_index=%d",
  109. __entry->prog_id,
  110. __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
  111. __entry->ifindex, __entry->to_ifindex,
  112. __entry->err, __entry->map_id, __entry->map_index)
  113. );
  114. DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
  115. TP_PROTO(const struct net_device *dev,
  116. const struct bpf_prog *xdp,
  117. const void *tgt, int err,
  118. enum bpf_map_type map_type,
  119. u32 map_id, u32 index),
  120. TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
  121. );
  122. DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
  123. TP_PROTO(const struct net_device *dev,
  124. const struct bpf_prog *xdp,
  125. const void *tgt, int err,
  126. enum bpf_map_type map_type,
  127. u32 map_id, u32 index),
  128. TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
  129. );
  130. #define _trace_xdp_redirect(dev, xdp, to) \
  131. trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
  132. #define _trace_xdp_redirect_err(dev, xdp, to, err) \
  133. trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
  134. #define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
  135. trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
  136. #define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
  137. trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
  138. /* not used anymore, but kept around so as not to break old programs */
  139. DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
  140. TP_PROTO(const struct net_device *dev,
  141. const struct bpf_prog *xdp,
  142. const void *tgt, int err,
  143. enum bpf_map_type map_type,
  144. u32 map_id, u32 index),
  145. TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
  146. );
  147. DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
  148. TP_PROTO(const struct net_device *dev,
  149. const struct bpf_prog *xdp,
  150. const void *tgt, int err,
  151. enum bpf_map_type map_type,
  152. u32 map_id, u32 index),
  153. TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
  154. );
  155. TRACE_EVENT(xdp_cpumap_kthread,
  156. TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
  157. int sched, struct xdp_cpumap_stats *xdp_stats),
  158. TP_ARGS(map_id, processed, drops, sched, xdp_stats),
  159. TP_STRUCT__entry(
  160. __field(int, map_id)
  161. __field(u32, act)
  162. __field(int, cpu)
  163. __field(unsigned int, drops)
  164. __field(unsigned int, processed)
  165. __field(int, sched)
  166. __field(unsigned int, xdp_pass)
  167. __field(unsigned int, xdp_drop)
  168. __field(unsigned int, xdp_redirect)
  169. ),
  170. TP_fast_assign(
  171. __entry->map_id = map_id;
  172. __entry->act = XDP_REDIRECT;
  173. __entry->cpu = smp_processor_id();
  174. __entry->drops = drops;
  175. __entry->processed = processed;
  176. __entry->sched = sched;
  177. __entry->xdp_pass = xdp_stats->pass;
  178. __entry->xdp_drop = xdp_stats->drop;
  179. __entry->xdp_redirect = xdp_stats->redirect;
  180. ),
  181. TP_printk("kthread"
  182. " cpu=%d map_id=%d action=%s"
  183. " processed=%u drops=%u"
  184. " sched=%d"
  185. " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
  186. __entry->cpu, __entry->map_id,
  187. __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
  188. __entry->processed, __entry->drops,
  189. __entry->sched,
  190. __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
  191. );
  192. TRACE_EVENT(xdp_cpumap_enqueue,
  193. TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
  194. int to_cpu),
  195. TP_ARGS(map_id, processed, drops, to_cpu),
  196. TP_STRUCT__entry(
  197. __field(int, map_id)
  198. __field(u32, act)
  199. __field(int, cpu)
  200. __field(unsigned int, drops)
  201. __field(unsigned int, processed)
  202. __field(int, to_cpu)
  203. ),
  204. TP_fast_assign(
  205. __entry->map_id = map_id;
  206. __entry->act = XDP_REDIRECT;
  207. __entry->cpu = smp_processor_id();
  208. __entry->drops = drops;
  209. __entry->processed = processed;
  210. __entry->to_cpu = to_cpu;
  211. ),
  212. TP_printk("enqueue"
  213. " cpu=%d map_id=%d action=%s"
  214. " processed=%u drops=%u"
  215. " to_cpu=%d",
  216. __entry->cpu, __entry->map_id,
  217. __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
  218. __entry->processed, __entry->drops,
  219. __entry->to_cpu)
  220. );
  221. TRACE_EVENT(xdp_devmap_xmit,
  222. TP_PROTO(const struct net_device *from_dev,
  223. const struct net_device *to_dev,
  224. int sent, int drops, int err),
  225. TP_ARGS(from_dev, to_dev, sent, drops, err),
  226. TP_STRUCT__entry(
  227. __field(int, from_ifindex)
  228. __field(u32, act)
  229. __field(int, to_ifindex)
  230. __field(int, drops)
  231. __field(int, sent)
  232. __field(int, err)
  233. ),
  234. TP_fast_assign(
  235. __entry->from_ifindex = from_dev->ifindex;
  236. __entry->act = XDP_REDIRECT;
  237. __entry->to_ifindex = to_dev->ifindex;
  238. __entry->drops = drops;
  239. __entry->sent = sent;
  240. __entry->err = err;
  241. ),
  242. TP_printk("ndo_xdp_xmit"
  243. " from_ifindex=%d to_ifindex=%d action=%s"
  244. " sent=%d drops=%d"
  245. " err=%d",
  246. __entry->from_ifindex, __entry->to_ifindex,
  247. __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
  248. __entry->sent, __entry->drops,
  249. __entry->err)
  250. );
  251. /* Expect users already include <net/xdp.h>, but not xdp_priv.h */
  252. #include <net/xdp_priv.h>
  253. #define __MEM_TYPE_MAP(FN) \
  254. FN(PAGE_SHARED) \
  255. FN(PAGE_ORDER0) \
  256. FN(PAGE_POOL) \
  257. FN(XSK_BUFF_POOL)
  258. #define __MEM_TYPE_TP_FN(x) \
  259. TRACE_DEFINE_ENUM(MEM_TYPE_##x);
  260. #define __MEM_TYPE_SYM_FN(x) \
  261. { MEM_TYPE_##x, #x },
  262. #define __MEM_TYPE_SYM_TAB \
  263. __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
  264. __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
  265. TRACE_EVENT(mem_disconnect,
  266. TP_PROTO(const struct xdp_mem_allocator *xa),
  267. TP_ARGS(xa),
  268. TP_STRUCT__entry(
  269. __field(const struct xdp_mem_allocator *, xa)
  270. __field(u32, mem_id)
  271. __field(u32, mem_type)
  272. __field(const void *, allocator)
  273. ),
  274. TP_fast_assign(
  275. __entry->xa = xa;
  276. __entry->mem_id = xa->mem.id;
  277. __entry->mem_type = xa->mem.type;
  278. __entry->allocator = xa->allocator;
  279. ),
  280. TP_printk("mem_id=%d mem_type=%s allocator=%p",
  281. __entry->mem_id,
  282. __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
  283. __entry->allocator
  284. )
  285. );
  286. TRACE_EVENT(mem_connect,
  287. TP_PROTO(const struct xdp_mem_allocator *xa,
  288. const struct xdp_rxq_info *rxq),
  289. TP_ARGS(xa, rxq),
  290. TP_STRUCT__entry(
  291. __field(const struct xdp_mem_allocator *, xa)
  292. __field(u32, mem_id)
  293. __field(u32, mem_type)
  294. __field(const void *, allocator)
  295. __field(const struct xdp_rxq_info *, rxq)
  296. __field(int, ifindex)
  297. ),
  298. TP_fast_assign(
  299. __entry->xa = xa;
  300. __entry->mem_id = xa->mem.id;
  301. __entry->mem_type = xa->mem.type;
  302. __entry->allocator = xa->allocator;
  303. __entry->rxq = rxq;
  304. __entry->ifindex = rxq->dev->ifindex;
  305. ),
  306. TP_printk("mem_id=%d mem_type=%s allocator=%p"
  307. " ifindex=%d",
  308. __entry->mem_id,
  309. __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
  310. __entry->allocator,
  311. __entry->ifindex
  312. )
  313. );
  314. TRACE_EVENT(mem_return_failed,
  315. TP_PROTO(const struct xdp_mem_info *mem,
  316. const struct page *page),
  317. TP_ARGS(mem, page),
  318. TP_STRUCT__entry(
  319. __field(const struct page *, page)
  320. __field(u32, mem_id)
  321. __field(u32, mem_type)
  322. ),
  323. TP_fast_assign(
  324. __entry->page = page;
  325. __entry->mem_id = mem->id;
  326. __entry->mem_type = mem->type;
  327. ),
  328. TP_printk("mem_id=%d mem_type=%s page=%p",
  329. __entry->mem_id,
  330. __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
  331. __entry->page
  332. )
  333. );
  334. #endif /* _TRACE_XDP_H */
  335. #include <trace/define_trace.h>