bpf-cgroup.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _BPF_CGROUP_H
  3. #define _BPF_CGROUP_H
  4. #include <linux/bpf.h>
  5. #include <linux/bpf-cgroup-defs.h>
  6. #include <linux/errno.h>
  7. #include <linux/jump_label.h>
  8. #include <linux/percpu.h>
  9. #include <linux/rbtree.h>
  10. #include <net/sock.h>
  11. #include <uapi/linux/bpf.h>
  12. struct sock;
  13. struct sockaddr;
  14. struct cgroup;
  15. struct sk_buff;
  16. struct bpf_map;
  17. struct bpf_prog;
  18. struct bpf_sock_ops_kern;
  19. struct bpf_cgroup_storage;
  20. struct ctl_table;
  21. struct ctl_table_header;
  22. struct task_struct;
  23. unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
  24. const struct bpf_insn *insn);
  25. unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
  26. const struct bpf_insn *insn);
  27. unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
  28. const struct bpf_insn *insn);
  29. #ifdef CONFIG_CGROUP_BPF
  30. #define CGROUP_ATYPE(type) \
  31. case BPF_##type: return type
  32. static inline enum cgroup_bpf_attach_type
  33. to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
  34. {
  35. switch (attach_type) {
  36. CGROUP_ATYPE(CGROUP_INET_INGRESS);
  37. CGROUP_ATYPE(CGROUP_INET_EGRESS);
  38. CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
  39. CGROUP_ATYPE(CGROUP_SOCK_OPS);
  40. CGROUP_ATYPE(CGROUP_DEVICE);
  41. CGROUP_ATYPE(CGROUP_INET4_BIND);
  42. CGROUP_ATYPE(CGROUP_INET6_BIND);
  43. CGROUP_ATYPE(CGROUP_INET4_CONNECT);
  44. CGROUP_ATYPE(CGROUP_INET6_CONNECT);
  45. CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
  46. CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
  47. CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
  48. CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
  49. CGROUP_ATYPE(CGROUP_SYSCTL);
  50. CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
  51. CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
  52. CGROUP_ATYPE(CGROUP_GETSOCKOPT);
  53. CGROUP_ATYPE(CGROUP_SETSOCKOPT);
  54. CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
  55. CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
  56. CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
  57. CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
  58. CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
  59. default:
  60. return CGROUP_BPF_ATTACH_TYPE_INVALID;
  61. }
  62. }
  63. #undef CGROUP_ATYPE
  64. extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
  65. #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
  66. #define for_each_cgroup_storage_type(stype) \
  67. for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
  68. struct bpf_cgroup_storage_map;
  69. struct bpf_storage_buffer {
  70. struct rcu_head rcu;
  71. char data[];
  72. };
  73. struct bpf_cgroup_storage {
  74. union {
  75. struct bpf_storage_buffer *buf;
  76. void __percpu *percpu_buf;
  77. };
  78. struct bpf_cgroup_storage_map *map;
  79. struct bpf_cgroup_storage_key key;
  80. struct list_head list_map;
  81. struct list_head list_cg;
  82. struct rb_node node;
  83. struct rcu_head rcu;
  84. };
  85. struct bpf_cgroup_link {
  86. struct bpf_link link;
  87. struct cgroup *cgroup;
  88. enum bpf_attach_type type;
  89. };
  90. struct bpf_prog_list {
  91. struct hlist_node node;
  92. struct bpf_prog *prog;
  93. struct bpf_cgroup_link *link;
  94. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
  95. };
  96. int cgroup_bpf_inherit(struct cgroup *cgrp);
  97. void cgroup_bpf_offline(struct cgroup *cgrp);
  98. int __cgroup_bpf_run_filter_skb(struct sock *sk,
  99. struct sk_buff *skb,
  100. enum cgroup_bpf_attach_type atype);
  101. int __cgroup_bpf_run_filter_sk(struct sock *sk,
  102. enum cgroup_bpf_attach_type atype);
  103. int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
  104. struct sockaddr *uaddr,
  105. enum cgroup_bpf_attach_type atype,
  106. void *t_ctx,
  107. u32 *flags);
  108. int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
  109. struct bpf_sock_ops_kern *sock_ops,
  110. enum cgroup_bpf_attach_type atype);
  111. int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
  112. short access, enum cgroup_bpf_attach_type atype);
  113. int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
  114. struct ctl_table *table, int write,
  115. char **buf, size_t *pcount, loff_t *ppos,
  116. enum cgroup_bpf_attach_type atype);
  117. int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
  118. int *optname, char __user *optval,
  119. int *optlen, char **kernel_optval);
  120. int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
  121. int optname, char __user *optval,
  122. int __user *optlen, int max_optlen,
  123. int retval);
  124. int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
  125. int optname, void *optval,
  126. int *optlen, int retval);
  127. static inline enum bpf_cgroup_storage_type cgroup_storage_type(
  128. struct bpf_map *map)
  129. {
  130. if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
  131. return BPF_CGROUP_STORAGE_PERCPU;
  132. return BPF_CGROUP_STORAGE_SHARED;
  133. }
  134. struct bpf_cgroup_storage *
  135. cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
  136. void *key, bool locked);
  137. struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
  138. enum bpf_cgroup_storage_type stype);
  139. void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
  140. void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
  141. struct cgroup *cgroup,
  142. enum bpf_attach_type type);
  143. void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
  144. int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
  145. int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
  146. int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
  147. void *value, u64 flags);
  148. /* Opportunistic check to see whether we have any BPF program attached*/
  149. static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
  150. enum cgroup_bpf_attach_type type)
  151. {
  152. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  153. struct bpf_prog_array *array;
  154. array = rcu_access_pointer(cgrp->bpf.effective[type]);
  155. return array != &bpf_empty_prog_array.hdr;
  156. }
  157. /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
  158. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
  159. ({ \
  160. int __ret = 0; \
  161. if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
  162. cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
  163. __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
  164. CGROUP_INET_INGRESS); \
  165. \
  166. __ret; \
  167. })
  168. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
  169. ({ \
  170. int __ret = 0; \
  171. if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
  172. typeof(sk) __sk = sk_to_full_sk(sk); \
  173. if (sk_fullsock(__sk) && \
  174. cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
  175. __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
  176. CGROUP_INET_EGRESS); \
  177. } \
  178. __ret; \
  179. })
  180. #define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
  181. ({ \
  182. int __ret = 0; \
  183. if (cgroup_bpf_enabled(atype)) { \
  184. __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
  185. } \
  186. __ret; \
  187. })
  188. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
  189. BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
  190. #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
  191. BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
  192. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
  193. BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
  194. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
  195. BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
  196. #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
  197. ({ \
  198. int __ret = 0; \
  199. if (cgroup_bpf_enabled(atype)) \
  200. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
  201. NULL, NULL); \
  202. __ret; \
  203. })
  204. #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
  205. ({ \
  206. int __ret = 0; \
  207. if (cgroup_bpf_enabled(atype)) { \
  208. lock_sock(sk); \
  209. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
  210. t_ctx, NULL); \
  211. release_sock(sk); \
  212. } \
  213. __ret; \
  214. })
  215. /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
  216. * via upper bits of return code. The only flag that is supported
  217. * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
  218. * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
  219. */
  220. #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
  221. ({ \
  222. u32 __flags = 0; \
  223. int __ret = 0; \
  224. if (cgroup_bpf_enabled(atype)) { \
  225. lock_sock(sk); \
  226. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
  227. NULL, &__flags); \
  228. release_sock(sk); \
  229. if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
  230. *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
  231. } \
  232. __ret; \
  233. })
  234. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
  235. ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
  236. cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
  237. (sk)->sk_prot->pre_connect)
  238. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
  239. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
  240. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
  241. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
  242. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
  243. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
  244. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
  245. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
  246. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  247. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
  248. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  249. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
  250. #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
  251. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
  252. #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
  253. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
  254. /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
  255. * fullsock and its parent fullsock cannot be traced by
  256. * sk_to_full_sk().
  257. *
  258. * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
  259. * Its listener-sk is not attached to the rsk_listener.
  260. * In this case, the caller holds the listener-sk (unlocked),
  261. * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
  262. * the listener-sk such that the cgroup-bpf-progs of the
  263. * listener-sk will be run.
  264. *
  265. * Regardless of syncookie mode or not,
  266. * calling bpf_setsockopt on listener-sk will not make sense anyway,
  267. * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
  268. */
  269. #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
  270. ({ \
  271. int __ret = 0; \
  272. if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
  273. __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
  274. sock_ops, \
  275. CGROUP_SOCK_OPS); \
  276. __ret; \
  277. })
  278. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
  279. ({ \
  280. int __ret = 0; \
  281. if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
  282. typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
  283. if (__sk && sk_fullsock(__sk)) \
  284. __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
  285. sock_ops, \
  286. CGROUP_SOCK_OPS); \
  287. } \
  288. __ret; \
  289. })
  290. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
  291. ({ \
  292. int __ret = 0; \
  293. if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
  294. __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
  295. access, \
  296. CGROUP_DEVICE); \
  297. \
  298. __ret; \
  299. })
  300. #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
  301. ({ \
  302. int __ret = 0; \
  303. if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
  304. __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
  305. buf, count, pos, \
  306. CGROUP_SYSCTL); \
  307. __ret; \
  308. })
  309. #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
  310. kernel_optval) \
  311. ({ \
  312. int __ret = 0; \
  313. if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
  314. cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
  315. __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
  316. optname, optval, \
  317. optlen, \
  318. kernel_optval); \
  319. __ret; \
  320. })
  321. #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
  322. ({ \
  323. int __ret = 0; \
  324. if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
  325. get_user(__ret, optlen); \
  326. __ret; \
  327. })
  328. #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
  329. max_optlen, retval) \
  330. ({ \
  331. int __ret = retval; \
  332. if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
  333. cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
  334. if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
  335. !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
  336. tcp_bpf_bypass_getsockopt, \
  337. level, optname)) \
  338. __ret = __cgroup_bpf_run_filter_getsockopt( \
  339. sock, level, optname, optval, optlen, \
  340. max_optlen, retval); \
  341. __ret; \
  342. })
  343. #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
  344. optlen, retval) \
  345. ({ \
  346. int __ret = retval; \
  347. if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
  348. __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
  349. sock, level, optname, optval, optlen, retval); \
  350. __ret; \
  351. })
  352. int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  353. enum bpf_prog_type ptype, struct bpf_prog *prog);
  354. int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  355. enum bpf_prog_type ptype);
  356. int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
  357. int cgroup_bpf_prog_query(const union bpf_attr *attr,
  358. union bpf_attr __user *uattr);
  359. const struct bpf_func_proto *
  360. cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
  361. const struct bpf_func_proto *
  362. cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
  363. #else
  364. static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
  365. static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
  366. static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  367. enum bpf_prog_type ptype,
  368. struct bpf_prog *prog)
  369. {
  370. return -EINVAL;
  371. }
  372. static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  373. enum bpf_prog_type ptype)
  374. {
  375. return -EINVAL;
  376. }
  377. static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
  378. struct bpf_prog *prog)
  379. {
  380. return -EINVAL;
  381. }
  382. static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
  383. union bpf_attr __user *uattr)
  384. {
  385. return -EINVAL;
  386. }
  387. static inline const struct bpf_func_proto *
  388. cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  389. {
  390. return NULL;
  391. }
  392. static inline const struct bpf_func_proto *
  393. cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  394. {
  395. return NULL;
  396. }
  397. static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
  398. struct bpf_map *map) { return 0; }
  399. static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
  400. struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
  401. static inline void bpf_cgroup_storage_free(
  402. struct bpf_cgroup_storage *storage) {}
  403. static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
  404. void *value) {
  405. return 0;
  406. }
  407. static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
  408. void *key, void *value, u64 flags) {
  409. return 0;
  410. }
  411. #define cgroup_bpf_enabled(atype) (0)
  412. #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
  413. #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
  414. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
  415. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
  416. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
  417. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
  418. #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
  419. #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
  420. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
  421. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
  422. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
  423. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
  424. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
  425. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
  426. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  427. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  428. #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
  429. #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
  430. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
  431. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
  432. #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
  433. #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
  434. #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
  435. optlen, max_optlen, retval) ({ retval; })
  436. #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
  437. optlen, retval) ({ retval; })
  438. #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
  439. kernel_optval) ({ 0; })
  440. #define for_each_cgroup_storage_type(stype) for (; false; )
  441. #endif /* CONFIG_CGROUP_BPF */
  442. #endif /* _BPF_CGROUP_H */