xdp_sock.h 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* AF_XDP internal functions
  3. * Copyright(c) 2018 Intel Corporation.
  4. */
  5. #ifndef _LINUX_XDP_SOCK_H
  6. #define _LINUX_XDP_SOCK_H
  7. #include <linux/bpf.h>
  8. #include <linux/workqueue.h>
  9. #include <linux/if_xdp.h>
  10. #include <linux/mutex.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/mm.h>
  13. #include <net/sock.h>
  14. struct net_device;
  15. struct xsk_queue;
  16. struct xdp_buff;
  17. struct xdp_umem {
  18. void *addrs;
  19. u64 size;
  20. u32 headroom;
  21. u32 chunk_size;
  22. u32 chunks;
  23. u32 npgs;
  24. struct user_struct *user;
  25. refcount_t users;
  26. u8 flags;
  27. bool zc;
  28. struct page **pgs;
  29. int id;
  30. struct list_head xsk_dma_list;
  31. struct work_struct work;
  32. };
  33. struct xsk_map {
  34. struct bpf_map map;
  35. spinlock_t lock; /* Synchronize map updates */
  36. struct xdp_sock __rcu *xsk_map[];
  37. };
  38. struct xdp_sock {
  39. /* struct sock must be the first member of struct xdp_sock */
  40. struct sock sk;
  41. struct xsk_queue *rx ____cacheline_aligned_in_smp;
  42. struct net_device *dev;
  43. struct xdp_umem *umem;
  44. struct list_head flush_node;
  45. struct xsk_buff_pool *pool;
  46. u16 queue_id;
  47. bool zc;
  48. enum {
  49. XSK_READY = 0,
  50. XSK_BOUND,
  51. XSK_UNBOUND,
  52. } state;
  53. struct xsk_queue *tx ____cacheline_aligned_in_smp;
  54. struct list_head tx_list;
  55. /* Protects generic receive. */
  56. spinlock_t rx_lock;
  57. /* Statistics */
  58. u64 rx_dropped;
  59. u64 rx_queue_full;
  60. struct list_head map_list;
  61. /* Protects map_list */
  62. spinlock_t map_list_lock;
  63. /* Protects multiple processes in the control path */
  64. struct mutex mutex;
  65. struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
  66. struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
  67. };
  68. #ifdef CONFIG_XDP_SOCKETS
  69. int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
  70. int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
  71. void __xsk_map_flush(void);
  72. #else
  73. static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
  74. {
  75. return -ENOTSUPP;
  76. }
  77. static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
  78. {
  79. return -EOPNOTSUPP;
  80. }
  81. static inline void __xsk_map_flush(void)
  82. {
  83. }
  84. #endif /* CONFIG_XDP_SOCKETS */
  85. #endif /* _LINUX_XDP_SOCK_H */