bpf.h 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  3. */
  4. #ifndef _LINUX_BPF_H
  5. #define _LINUX_BPF_H 1
  6. #include <uapi/linux/bpf.h>
  7. #include <uapi/linux/filter.h>
  8. #include <linux/workqueue.h>
  9. #include <linux/file.h>
  10. #include <linux/percpu.h>
  11. #include <linux/err.h>
  12. #include <linux/rbtree_latch.h>
  13. #include <linux/numa.h>
  14. #include <linux/mm_types.h>
  15. #include <linux/wait.h>
  16. #include <linux/refcount.h>
  17. #include <linux/mutex.h>
  18. #include <linux/module.h>
  19. #include <linux/kallsyms.h>
  20. #include <linux/capability.h>
  21. #include <linux/sched/mm.h>
  22. #include <linux/slab.h>
  23. #include <linux/percpu-refcount.h>
  24. #include <linux/stddef.h>
  25. #include <linux/bpfptr.h>
  26. #include <linux/btf.h>
  27. #include <linux/rcupdate_trace.h>
  28. #include <linux/static_call.h>
  29. #include <linux/android_kabi.h>
  30. struct bpf_verifier_env;
  31. struct bpf_verifier_log;
  32. struct perf_event;
  33. struct bpf_prog;
  34. struct bpf_prog_aux;
  35. struct bpf_map;
  36. struct sock;
  37. struct seq_file;
  38. struct btf;
  39. struct btf_type;
  40. struct exception_table_entry;
  41. struct seq_operations;
  42. struct bpf_iter_aux_info;
  43. struct bpf_local_storage;
  44. struct bpf_local_storage_map;
  45. struct kobject;
  46. struct mem_cgroup;
  47. struct module;
  48. struct bpf_func_state;
  49. struct ftrace_ops;
  50. struct cgroup;
  51. extern struct idr btf_idr;
  52. extern spinlock_t btf_idr_lock;
  53. extern struct kobject *btf_kobj;
  54. typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
  55. typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
  56. struct bpf_iter_aux_info *aux);
  57. typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
  58. typedef unsigned int (*bpf_func_t)(const void *,
  59. const struct bpf_insn *);
  60. struct bpf_iter_seq_info {
  61. const struct seq_operations *seq_ops;
  62. bpf_iter_init_seq_priv_t init_seq_private;
  63. bpf_iter_fini_seq_priv_t fini_seq_private;
  64. u32 seq_priv_size;
  65. };
  66. /* map is generic key/value storage optionally accessible by eBPF programs */
  67. struct bpf_map_ops {
  68. /* funcs callable from userspace (via syscall) */
  69. int (*map_alloc_check)(union bpf_attr *attr);
  70. struct bpf_map *(*map_alloc)(union bpf_attr *attr);
  71. void (*map_release)(struct bpf_map *map, struct file *map_file);
  72. void (*map_free)(struct bpf_map *map);
  73. int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
  74. void (*map_release_uref)(struct bpf_map *map);
  75. void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
  76. int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
  77. union bpf_attr __user *uattr);
  78. int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
  79. void *value, u64 flags);
  80. int (*map_lookup_and_delete_batch)(struct bpf_map *map,
  81. const union bpf_attr *attr,
  82. union bpf_attr __user *uattr);
  83. int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
  84. union bpf_attr __user *uattr);
  85. int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
  86. union bpf_attr __user *uattr);
  87. /* funcs callable from userspace and from eBPF programs */
  88. void *(*map_lookup_elem)(struct bpf_map *map, void *key);
  89. int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
  90. int (*map_delete_elem)(struct bpf_map *map, void *key);
  91. int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
  92. int (*map_pop_elem)(struct bpf_map *map, void *value);
  93. int (*map_peek_elem)(struct bpf_map *map, void *value);
  94. void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
  95. /* funcs called by prog_array and perf_event_array map */
  96. void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
  97. int fd);
  98. void (*map_fd_put_ptr)(void *ptr);
  99. int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
  100. u32 (*map_fd_sys_lookup_elem)(void *ptr);
  101. void (*map_seq_show_elem)(struct bpf_map *map, void *key,
  102. struct seq_file *m);
  103. int (*map_check_btf)(const struct bpf_map *map,
  104. const struct btf *btf,
  105. const struct btf_type *key_type,
  106. const struct btf_type *value_type);
  107. /* Prog poke tracking helpers. */
  108. int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
  109. void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
  110. void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
  111. struct bpf_prog *new);
  112. /* Direct value access helpers. */
  113. int (*map_direct_value_addr)(const struct bpf_map *map,
  114. u64 *imm, u32 off);
  115. int (*map_direct_value_meta)(const struct bpf_map *map,
  116. u64 imm, u32 *off);
  117. int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
  118. __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
  119. struct poll_table_struct *pts);
  120. /* Functions called by bpf_local_storage maps */
  121. int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
  122. void *owner, u32 size);
  123. void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
  124. void *owner, u32 size);
  125. struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
  126. /* Misc helpers.*/
  127. int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
  128. /* map_meta_equal must be implemented for maps that can be
  129. * used as an inner map. It is a runtime check to ensure
  130. * an inner map can be inserted to an outer map.
  131. *
  132. * Some properties of the inner map has been used during the
  133. * verification time. When inserting an inner map at the runtime,
  134. * map_meta_equal has to ensure the inserting map has the same
  135. * properties that the verifier has used earlier.
  136. */
  137. bool (*map_meta_equal)(const struct bpf_map *meta0,
  138. const struct bpf_map *meta1);
  139. int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
  140. struct bpf_func_state *caller,
  141. struct bpf_func_state *callee);
  142. int (*map_for_each_callback)(struct bpf_map *map,
  143. bpf_callback_t callback_fn,
  144. void *callback_ctx, u64 flags);
  145. /* BTF id of struct allocated by map_alloc */
  146. int *map_btf_id;
  147. /* bpf_iter info used to open a seq_file */
  148. const struct bpf_iter_seq_info *iter_seq_info;
  149. ANDROID_KABI_RESERVE(1);
  150. ANDROID_KABI_RESERVE(2);
  151. };
  152. enum {
  153. /* Support at most 8 pointers in a BPF map value */
  154. BPF_MAP_VALUE_OFF_MAX = 8,
  155. BPF_MAP_OFF_ARR_MAX = BPF_MAP_VALUE_OFF_MAX +
  156. 1 + /* for bpf_spin_lock */
  157. 1, /* for bpf_timer */
  158. };
  159. enum bpf_kptr_type {
  160. BPF_KPTR_UNREF,
  161. BPF_KPTR_REF,
  162. };
  163. struct bpf_map_value_off_desc {
  164. u32 offset;
  165. enum bpf_kptr_type type;
  166. struct {
  167. struct btf *btf;
  168. struct module *module;
  169. btf_dtor_kfunc_t dtor;
  170. u32 btf_id;
  171. } kptr;
  172. };
  173. struct bpf_map_value_off {
  174. u32 nr_off;
  175. struct bpf_map_value_off_desc off[];
  176. };
  177. struct bpf_map_off_arr {
  178. u32 cnt;
  179. u32 field_off[BPF_MAP_OFF_ARR_MAX];
  180. u8 field_sz[BPF_MAP_OFF_ARR_MAX];
  181. };
  182. struct bpf_map {
  183. /* The first two cachelines with read-mostly members of which some
  184. * are also accessed in fast-path (e.g. ops, max_entries).
  185. */
  186. const struct bpf_map_ops *ops ____cacheline_aligned;
  187. struct bpf_map *inner_map_meta;
  188. #ifdef CONFIG_SECURITY
  189. void *security;
  190. #endif
  191. enum bpf_map_type map_type;
  192. u32 key_size;
  193. u32 value_size;
  194. u32 max_entries;
  195. u64 map_extra; /* any per-map-type extra fields */
  196. u32 map_flags;
  197. int spin_lock_off; /* >=0 valid offset, <0 error */
  198. struct bpf_map_value_off *kptr_off_tab;
  199. int timer_off; /* >=0 valid offset, <0 error */
  200. u32 id;
  201. int numa_node;
  202. u32 btf_key_type_id;
  203. u32 btf_value_type_id;
  204. u32 btf_vmlinux_value_type_id;
  205. struct btf *btf;
  206. #ifdef CONFIG_MEMCG_KMEM
  207. struct obj_cgroup *objcg;
  208. #endif
  209. char name[BPF_OBJ_NAME_LEN];
  210. struct bpf_map_off_arr *off_arr;
  211. /* The 3rd and 4th cacheline with misc members to avoid false sharing
  212. * particularly with refcounting.
  213. */
  214. atomic64_t refcnt ____cacheline_aligned;
  215. atomic64_t usercnt;
  216. struct work_struct work;
  217. struct mutex freeze_mutex;
  218. atomic64_t writecnt;
  219. /* 'Ownership' of program-containing map is claimed by the first program
  220. * that is going to use this map or by the first program which FD is
  221. * stored in the map to make sure that all callers and callees have the
  222. * same prog type, JITed flag and xdp_has_frags flag.
  223. */
  224. struct {
  225. spinlock_t lock;
  226. enum bpf_prog_type type;
  227. bool jited;
  228. bool xdp_has_frags;
  229. } owner;
  230. bool bypass_spec_v1;
  231. bool frozen; /* write-once; write-protected by freeze_mutex */
  232. };
  233. static inline bool map_value_has_spin_lock(const struct bpf_map *map)
  234. {
  235. return map->spin_lock_off >= 0;
  236. }
  237. static inline bool map_value_has_timer(const struct bpf_map *map)
  238. {
  239. return map->timer_off >= 0;
  240. }
  241. static inline bool map_value_has_kptrs(const struct bpf_map *map)
  242. {
  243. return !IS_ERR_OR_NULL(map->kptr_off_tab);
  244. }
  245. /* 'dst' must be a temporary buffer and should not point to memory that is being
  246. * used in parallel by a bpf program or bpf syscall, otherwise the access from
  247. * the bpf program or bpf syscall may be corrupted by the reinitialization,
  248. * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
  249. * allocator, it is still possible for 'dst' to be used in parallel by a bpf
  250. * program or bpf syscall.
  251. */
  252. static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
  253. {
  254. if (unlikely(map_value_has_spin_lock(map)))
  255. memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
  256. if (unlikely(map_value_has_timer(map)))
  257. memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
  258. if (unlikely(map_value_has_kptrs(map))) {
  259. struct bpf_map_value_off *tab = map->kptr_off_tab;
  260. int i;
  261. for (i = 0; i < tab->nr_off; i++)
  262. *(u64 *)(dst + tab->off[i].offset) = 0;
  263. }
  264. }
  265. /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
  266. * forced to use 'long' read/writes to try to atomically copy long counters.
  267. * Best-effort only. No barriers here, since it _will_ race with concurrent
  268. * updates from BPF programs. Called from bpf syscall and mostly used with
  269. * size 8 or 16 bytes, so ask compiler to inline it.
  270. */
  271. static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
  272. {
  273. const long *lsrc = src;
  274. long *ldst = dst;
  275. size /= sizeof(long);
  276. while (size--)
  277. data_race(*ldst++ = *lsrc++);
  278. }
  279. /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
  280. static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, bool long_memcpy)
  281. {
  282. u32 curr_off = 0;
  283. int i;
  284. if (likely(!map->off_arr)) {
  285. if (long_memcpy)
  286. bpf_long_memcpy(dst, src, round_up(map->value_size, 8));
  287. else
  288. memcpy(dst, src, map->value_size);
  289. return;
  290. }
  291. for (i = 0; i < map->off_arr->cnt; i++) {
  292. u32 next_off = map->off_arr->field_off[i];
  293. memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
  294. curr_off = next_off + map->off_arr->field_sz[i];
  295. }
  296. memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
  297. }
  298. static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
  299. {
  300. __copy_map_value(map, dst, src, false);
  301. }
  302. static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
  303. {
  304. __copy_map_value(map, dst, src, true);
  305. }
  306. static inline void zero_map_value(struct bpf_map *map, void *dst)
  307. {
  308. u32 curr_off = 0;
  309. int i;
  310. if (likely(!map->off_arr)) {
  311. memset(dst, 0, map->value_size);
  312. return;
  313. }
  314. for (i = 0; i < map->off_arr->cnt; i++) {
  315. u32 next_off = map->off_arr->field_off[i];
  316. memset(dst + curr_off, 0, next_off - curr_off);
  317. curr_off = next_off + map->off_arr->field_sz[i];
  318. }
  319. memset(dst + curr_off, 0, map->value_size - curr_off);
  320. }
  321. void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
  322. bool lock_src);
  323. void bpf_timer_cancel_and_free(void *timer);
  324. int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
  325. struct bpf_offload_dev;
  326. struct bpf_offloaded_map;
  327. struct bpf_map_dev_ops {
  328. int (*map_get_next_key)(struct bpf_offloaded_map *map,
  329. void *key, void *next_key);
  330. int (*map_lookup_elem)(struct bpf_offloaded_map *map,
  331. void *key, void *value);
  332. int (*map_update_elem)(struct bpf_offloaded_map *map,
  333. void *key, void *value, u64 flags);
  334. int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
  335. ANDROID_KABI_RESERVE(1);
  336. };
  337. struct bpf_offloaded_map {
  338. struct bpf_map map;
  339. struct net_device *netdev;
  340. const struct bpf_map_dev_ops *dev_ops;
  341. void *dev_priv;
  342. struct list_head offloads;
  343. };
  344. static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
  345. {
  346. return container_of(map, struct bpf_offloaded_map, map);
  347. }
  348. static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
  349. {
  350. return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
  351. }
  352. static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
  353. {
  354. return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
  355. map->ops->map_seq_show_elem;
  356. }
  357. int map_check_no_btf(const struct bpf_map *map,
  358. const struct btf *btf,
  359. const struct btf_type *key_type,
  360. const struct btf_type *value_type);
  361. bool bpf_map_meta_equal(const struct bpf_map *meta0,
  362. const struct bpf_map *meta1);
  363. extern const struct bpf_map_ops bpf_map_offload_ops;
  364. /* bpf_type_flag contains a set of flags that are applicable to the values of
  365. * arg_type, ret_type and reg_type. For example, a pointer value may be null,
  366. * or a memory is read-only. We classify types into two categories: base types
  367. * and extended types. Extended types are base types combined with a type flag.
  368. *
  369. * Currently there are no more than 32 base types in arg_type, ret_type and
  370. * reg_types.
  371. */
  372. #define BPF_BASE_TYPE_BITS 8
  373. enum bpf_type_flag {
  374. /* PTR may be NULL. */
  375. PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
  376. /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
  377. * compatible with both mutable and immutable memory.
  378. */
  379. MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
  380. /* MEM was "allocated" from a different helper, and cannot be mixed
  381. * with regular non-MEM_ALLOC'ed MEM types.
  382. */
  383. MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
  384. /* MEM is in user address space. */
  385. MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
  386. /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
  387. * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
  388. * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
  389. * or bpf_this_cpu_ptr(), which will return the pointer corresponding
  390. * to the specified cpu.
  391. */
  392. MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
  393. /* Indicates that the argument will be released. */
  394. OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
  395. /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
  396. * unreferenced and referenced kptr loaded from map value using a load
  397. * instruction, so that they can only be dereferenced but not escape the
  398. * BPF program into the kernel (i.e. cannot be passed as arguments to
  399. * kfunc or bpf helpers).
  400. */
  401. PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
  402. MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
  403. /* DYNPTR points to memory local to the bpf program. */
  404. DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
  405. /* DYNPTR points to a kernel-produced ringbuf record. */
  406. DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
  407. /* Size is known at compile time. */
  408. MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
  409. __BPF_TYPE_FLAG_MAX,
  410. __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
  411. };
  412. #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF)
  413. /* Max number of base types. */
  414. #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
  415. /* Max number of all types. */
  416. #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
  417. /* function argument constraints */
  418. enum bpf_arg_type {
  419. ARG_DONTCARE = 0, /* unused argument in helper function */
  420. /* the following constraints used to prototype
  421. * bpf_map_lookup/update/delete_elem() functions
  422. */
  423. ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
  424. ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
  425. ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
  426. /* Used to prototype bpf_memcmp() and other functions that access data
  427. * on eBPF program stack
  428. */
  429. ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
  430. ARG_CONST_SIZE, /* number of bytes accessed from memory */
  431. ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
  432. ARG_PTR_TO_CTX, /* pointer to context */
  433. ARG_ANYTHING, /* any (initialized) argument is ok */
  434. ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
  435. ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
  436. ARG_PTR_TO_INT, /* pointer to int */
  437. ARG_PTR_TO_LONG, /* pointer to long */
  438. ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
  439. ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
  440. ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
  441. ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
  442. ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
  443. ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
  444. ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
  445. ARG_PTR_TO_STACK, /* pointer to stack */
  446. ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
  447. ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
  448. ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
  449. ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
  450. __BPF_ARG_TYPE_MAX,
  451. /* Extended arg_types. */
  452. ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
  453. ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
  454. ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
  455. ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
  456. ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
  457. ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
  458. ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
  459. /* pointer to memory does not need to be initialized, helper function must fill
  460. * all bytes or clear them in error case.
  461. */
  462. ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
  463. /* Pointer to valid memory of size known at compile time. */
  464. ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
  465. /* This must be the last entry. Its purpose is to ensure the enum is
  466. * wide enough to hold the higher bits reserved for bpf_type_flag.
  467. */
  468. __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
  469. };
  470. static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
  471. /* type of values returned from helper functions */
  472. enum bpf_return_type {
  473. RET_INTEGER, /* function returns integer */
  474. RET_VOID, /* function doesn't return anything */
  475. RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
  476. RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
  477. RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
  478. RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
  479. RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
  480. RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
  481. RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
  482. __BPF_RET_TYPE_MAX,
  483. /* Extended ret_types. */
  484. RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
  485. RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
  486. RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
  487. RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
  488. RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
  489. RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM,
  490. RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
  491. /* This must be the last entry. Its purpose is to ensure the enum is
  492. * wide enough to hold the higher bits reserved for bpf_type_flag.
  493. */
  494. __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
  495. };
  496. static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
  497. /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
  498. * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
  499. * instructions after verifying
  500. */
  501. struct bpf_func_proto {
  502. u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  503. bool gpl_only;
  504. bool pkt_access;
  505. enum bpf_return_type ret_type;
  506. union {
  507. struct {
  508. enum bpf_arg_type arg1_type;
  509. enum bpf_arg_type arg2_type;
  510. enum bpf_arg_type arg3_type;
  511. enum bpf_arg_type arg4_type;
  512. enum bpf_arg_type arg5_type;
  513. };
  514. enum bpf_arg_type arg_type[5];
  515. };
  516. union {
  517. struct {
  518. u32 *arg1_btf_id;
  519. u32 *arg2_btf_id;
  520. u32 *arg3_btf_id;
  521. u32 *arg4_btf_id;
  522. u32 *arg5_btf_id;
  523. };
  524. u32 *arg_btf_id[5];
  525. struct {
  526. size_t arg1_size;
  527. size_t arg2_size;
  528. size_t arg3_size;
  529. size_t arg4_size;
  530. size_t arg5_size;
  531. };
  532. size_t arg_size[5];
  533. };
  534. int *ret_btf_id; /* return value btf_id */
  535. bool (*allowed)(const struct bpf_prog *prog);
  536. };
  537. /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
  538. * the first argument to eBPF programs.
  539. * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
  540. */
  541. struct bpf_context;
  542. enum bpf_access_type {
  543. BPF_READ = 1,
  544. BPF_WRITE = 2
  545. };
  546. /* types of values stored in eBPF registers */
  547. /* Pointer types represent:
  548. * pointer
  549. * pointer + imm
  550. * pointer + (u16) var
  551. * pointer + (u16) var + imm
  552. * if (range > 0) then [ptr, ptr + range - off) is safe to access
  553. * if (id > 0) means that some 'var' was added
  554. * if (off > 0) means that 'imm' was added
  555. */
  556. enum bpf_reg_type {
  557. NOT_INIT = 0, /* nothing was written into register */
  558. SCALAR_VALUE, /* reg doesn't contain a valid pointer */
  559. PTR_TO_CTX, /* reg points to bpf_context */
  560. CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
  561. PTR_TO_MAP_VALUE, /* reg points to map element value */
  562. PTR_TO_MAP_KEY, /* reg points to a map element key */
  563. PTR_TO_STACK, /* reg == frame_pointer + offset */
  564. PTR_TO_PACKET_META, /* skb->data - meta_len */
  565. PTR_TO_PACKET, /* reg points to skb->data */
  566. PTR_TO_PACKET_END, /* skb->data + headlen */
  567. PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
  568. PTR_TO_SOCKET, /* reg points to struct bpf_sock */
  569. PTR_TO_SOCK_COMMON, /* reg points to sock_common */
  570. PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
  571. PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
  572. PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
  573. /* PTR_TO_BTF_ID points to a kernel struct that does not need
  574. * to be null checked by the BPF program. This does not imply the
  575. * pointer is _not_ null and in practice this can easily be a null
  576. * pointer when reading pointer chains. The assumption is program
  577. * context will handle null pointer dereference typically via fault
  578. * handling. The verifier must keep this in mind and can make no
  579. * assumptions about null or non-null when doing branch analysis.
  580. * Further, when passed into helpers the helpers can not, without
  581. * additional context, assume the value is non-null.
  582. */
  583. PTR_TO_BTF_ID,
  584. /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
  585. * been checked for null. Used primarily to inform the verifier
  586. * an explicit null check is required for this struct.
  587. */
  588. PTR_TO_MEM, /* reg points to valid memory region */
  589. PTR_TO_BUF, /* reg points to a read/write buffer */
  590. PTR_TO_FUNC, /* reg points to a bpf program function */
  591. PTR_TO_DYNPTR, /* reg points to a dynptr */
  592. __BPF_REG_TYPE_MAX,
  593. /* Extended reg_types. */
  594. PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
  595. PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
  596. PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
  597. PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
  598. PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
  599. /* This must be the last entry. Its purpose is to ensure the enum is
  600. * wide enough to hold the higher bits reserved for bpf_type_flag.
  601. */
  602. __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
  603. };
  604. static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
  605. /* The information passed from prog-specific *_is_valid_access
  606. * back to the verifier.
  607. */
  608. struct bpf_insn_access_aux {
  609. enum bpf_reg_type reg_type;
  610. union {
  611. int ctx_field_size;
  612. struct {
  613. struct btf *btf;
  614. u32 btf_id;
  615. };
  616. };
  617. struct bpf_verifier_log *log; /* for verbose logs */
  618. };
  619. static inline void
  620. bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
  621. {
  622. aux->ctx_field_size = size;
  623. }
  624. static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
  625. {
  626. return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
  627. insn->src_reg == BPF_PSEUDO_FUNC;
  628. }
  629. struct bpf_prog_ops {
  630. int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
  631. union bpf_attr __user *uattr);
  632. };
  633. struct bpf_verifier_ops {
  634. /* return eBPF function prototype for verification */
  635. const struct bpf_func_proto *
  636. (*get_func_proto)(enum bpf_func_id func_id,
  637. const struct bpf_prog *prog);
  638. /* return true if 'size' wide access at offset 'off' within bpf_context
  639. * with 'type' (read or write) is allowed
  640. */
  641. bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
  642. const struct bpf_prog *prog,
  643. struct bpf_insn_access_aux *info);
  644. int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
  645. const struct bpf_prog *prog);
  646. int (*gen_ld_abs)(const struct bpf_insn *orig,
  647. struct bpf_insn *insn_buf);
  648. u32 (*convert_ctx_access)(enum bpf_access_type type,
  649. const struct bpf_insn *src,
  650. struct bpf_insn *dst,
  651. struct bpf_prog *prog, u32 *target_size);
  652. int (*btf_struct_access)(struct bpf_verifier_log *log,
  653. const struct btf *btf,
  654. const struct btf_type *t, int off, int size,
  655. enum bpf_access_type atype,
  656. u32 *next_btf_id, enum bpf_type_flag *flag);
  657. ANDROID_KABI_RESERVE(1);
  658. };
  659. struct bpf_prog_offload_ops {
  660. /* verifier basic callbacks */
  661. int (*insn_hook)(struct bpf_verifier_env *env,
  662. int insn_idx, int prev_insn_idx);
  663. int (*finalize)(struct bpf_verifier_env *env);
  664. /* verifier optimization callbacks (called after .finalize) */
  665. int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
  666. struct bpf_insn *insn);
  667. int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
  668. /* program management callbacks */
  669. int (*prepare)(struct bpf_prog *prog);
  670. int (*translate)(struct bpf_prog *prog);
  671. void (*destroy)(struct bpf_prog *prog);
  672. ANDROID_KABI_RESERVE(1);
  673. };
  674. struct bpf_prog_offload {
  675. struct bpf_prog *prog;
  676. struct net_device *netdev;
  677. struct bpf_offload_dev *offdev;
  678. void *dev_priv;
  679. struct list_head offloads;
  680. bool dev_state;
  681. bool opt_failed;
  682. void *jited_image;
  683. u32 jited_len;
  684. ANDROID_KABI_RESERVE(1);
  685. };
  686. enum bpf_cgroup_storage_type {
  687. BPF_CGROUP_STORAGE_SHARED,
  688. BPF_CGROUP_STORAGE_PERCPU,
  689. __BPF_CGROUP_STORAGE_MAX
  690. };
  691. #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
  692. /* The longest tracepoint has 12 args.
  693. * See include/trace/bpf_probe.h
  694. */
  695. #define MAX_BPF_FUNC_ARGS 12
  696. /* The maximum number of arguments passed through registers
  697. * a single function may have.
  698. */
  699. #define MAX_BPF_FUNC_REG_ARGS 5
  700. /* The argument is a structure. */
  701. #define BTF_FMODEL_STRUCT_ARG BIT(0)
  702. struct btf_func_model {
  703. u8 ret_size;
  704. u8 nr_args;
  705. u8 arg_size[MAX_BPF_FUNC_ARGS];
  706. u8 arg_flags[MAX_BPF_FUNC_ARGS];
  707. };
  708. /* Restore arguments before returning from trampoline to let original function
  709. * continue executing. This flag is used for fentry progs when there are no
  710. * fexit progs.
  711. */
  712. #define BPF_TRAMP_F_RESTORE_REGS BIT(0)
  713. /* Call original function after fentry progs, but before fexit progs.
  714. * Makes sense for fentry/fexit, normal calls and indirect calls.
  715. */
  716. #define BPF_TRAMP_F_CALL_ORIG BIT(1)
  717. /* Skip current frame and return to parent. Makes sense for fentry/fexit
  718. * programs only. Should not be used with normal calls and indirect calls.
  719. */
  720. #define BPF_TRAMP_F_SKIP_FRAME BIT(2)
  721. /* Store IP address of the caller on the trampoline stack,
  722. * so it's available for trampoline's programs.
  723. */
  724. #define BPF_TRAMP_F_IP_ARG BIT(3)
  725. /* Return the return value of fentry prog. Only used by bpf_struct_ops. */
  726. #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
  727. /* Get original function from stack instead of from provided direct address.
  728. * Makes sense for trampolines with fexit or fmod_ret programs.
  729. */
  730. #define BPF_TRAMP_F_ORIG_STACK BIT(5)
  731. /* This trampoline is on a function with another ftrace_ops with IPMODIFY,
  732. * e.g., a live patch. This flag is set and cleared by ftrace call backs,
  733. */
  734. #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
  735. /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
  736. * bytes on x86.
  737. */
  738. #define BPF_MAX_TRAMP_LINKS 38
  739. struct bpf_tramp_links {
  740. struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
  741. int nr_links;
  742. };
  743. struct bpf_tramp_run_ctx;
  744. /* Different use cases for BPF trampoline:
  745. * 1. replace nop at the function entry (kprobe equivalent)
  746. * flags = BPF_TRAMP_F_RESTORE_REGS
  747. * fentry = a set of programs to run before returning from trampoline
  748. *
  749. * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
  750. * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
  751. * orig_call = fentry_ip + MCOUNT_INSN_SIZE
  752. * fentry = a set of program to run before calling original function
  753. * fexit = a set of program to run after original function
  754. *
  755. * 3. replace direct call instruction anywhere in the function body
  756. * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
  757. * With flags = 0
  758. * fentry = a set of programs to run before returning from trampoline
  759. * With flags = BPF_TRAMP_F_CALL_ORIG
  760. * orig_call = original callback addr or direct function addr
  761. * fentry = a set of program to run before calling original function
  762. * fexit = a set of program to run after original function
  763. */
  764. struct bpf_tramp_image;
  765. int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
  766. const struct btf_func_model *m, u32 flags,
  767. struct bpf_tramp_links *tlinks,
  768. void *orig_call);
  769. u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
  770. struct bpf_tramp_run_ctx *run_ctx);
  771. void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
  772. struct bpf_tramp_run_ctx *run_ctx);
  773. void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
  774. void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
  775. typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
  776. struct bpf_tramp_run_ctx *run_ctx);
  777. typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
  778. struct bpf_tramp_run_ctx *run_ctx);
  779. bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
  780. bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
  781. struct bpf_ksym {
  782. unsigned long start;
  783. unsigned long end;
  784. char name[KSYM_NAME_LEN];
  785. struct list_head lnode;
  786. struct latch_tree_node tnode;
  787. bool prog;
  788. };
  789. enum bpf_tramp_prog_type {
  790. BPF_TRAMP_FENTRY,
  791. BPF_TRAMP_FEXIT,
  792. BPF_TRAMP_MODIFY_RETURN,
  793. BPF_TRAMP_MAX,
  794. BPF_TRAMP_REPLACE, /* more than MAX */
  795. };
  796. struct bpf_tramp_image {
  797. void *image;
  798. struct bpf_ksym ksym;
  799. struct percpu_ref pcref;
  800. void *ip_after_call;
  801. void *ip_epilogue;
  802. union {
  803. struct rcu_head rcu;
  804. struct work_struct work;
  805. };
  806. };
  807. struct bpf_trampoline {
  808. /* hlist for trampoline_table */
  809. struct hlist_node hlist;
  810. struct ftrace_ops *fops;
  811. /* serializes access to fields of this trampoline */
  812. struct mutex mutex;
  813. refcount_t refcnt;
  814. u32 flags;
  815. u64 key;
  816. struct {
  817. struct btf_func_model model;
  818. void *addr;
  819. bool ftrace_managed;
  820. } func;
  821. /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
  822. * program by replacing one of its functions. func.addr is the address
  823. * of the function it replaced.
  824. */
  825. struct bpf_prog *extension_prog;
  826. /* list of BPF programs using this trampoline */
  827. struct hlist_head progs_hlist[BPF_TRAMP_MAX];
  828. /* Number of attached programs. A counter per kind. */
  829. int progs_cnt[BPF_TRAMP_MAX];
  830. /* Executable image of trampoline */
  831. struct bpf_tramp_image *cur_image;
  832. u64 selector;
  833. struct module *mod;
  834. ANDROID_KABI_RESERVE(1);
  835. };
  836. struct bpf_attach_target_info {
  837. struct btf_func_model fmodel;
  838. long tgt_addr;
  839. const char *tgt_name;
  840. const struct btf_type *tgt_type;
  841. };
  842. #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
  843. struct bpf_dispatcher_prog {
  844. struct bpf_prog *prog;
  845. refcount_t users;
  846. };
  847. struct bpf_dispatcher {
  848. /* dispatcher mutex */
  849. struct mutex mutex;
  850. void *func;
  851. struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
  852. int num_progs;
  853. void *image;
  854. void *rw_image;
  855. u32 image_off;
  856. struct bpf_ksym ksym;
  857. #ifdef CONFIG_HAVE_STATIC_CALL
  858. struct static_call_key *sc_key;
  859. void *sc_tramp;
  860. #endif
  861. ANDROID_KABI_RESERVE(1);
  862. };
  863. static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
  864. const void *ctx,
  865. const struct bpf_insn *insnsi,
  866. bpf_func_t bpf_func)
  867. {
  868. return bpf_func(ctx, insnsi);
  869. }
  870. #ifdef CONFIG_BPF_JIT
  871. int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
  872. int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
  873. struct bpf_trampoline *bpf_trampoline_get(u64 key,
  874. struct bpf_attach_target_info *tgt_info);
  875. void bpf_trampoline_put(struct bpf_trampoline *tr);
  876. int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
  877. /*
  878. * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
  879. * indirection with a direct call to the bpf program. If the architecture does
  880. * not have STATIC_CALL, avoid a double-indirection.
  881. */
  882. #ifdef CONFIG_HAVE_STATIC_CALL
  883. #define __BPF_DISPATCHER_SC_INIT(_name) \
  884. .sc_key = &STATIC_CALL_KEY(_name), \
  885. .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
  886. #define __BPF_DISPATCHER_SC(name) \
  887. DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
  888. #define __BPF_DISPATCHER_CALL(name) \
  889. static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
  890. #define __BPF_DISPATCHER_UPDATE(_d, _new) \
  891. __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
  892. #else
  893. #define __BPF_DISPATCHER_SC_INIT(name)
  894. #define __BPF_DISPATCHER_SC(name)
  895. #define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
  896. #define __BPF_DISPATCHER_UPDATE(_d, _new)
  897. #endif
  898. #define BPF_DISPATCHER_INIT(_name) { \
  899. .mutex = __MUTEX_INITIALIZER(_name.mutex), \
  900. .func = &_name##_func, \
  901. .progs = {}, \
  902. .num_progs = 0, \
  903. .image = NULL, \
  904. .image_off = 0, \
  905. .ksym = { \
  906. .name = #_name, \
  907. .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
  908. }, \
  909. __BPF_DISPATCHER_SC_INIT(_name##_call) \
  910. }
  911. #define DEFINE_BPF_DISPATCHER(name) \
  912. __BPF_DISPATCHER_SC(name); \
  913. noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
  914. const void *ctx, \
  915. const struct bpf_insn *insnsi, \
  916. bpf_func_t bpf_func) \
  917. { \
  918. return __BPF_DISPATCHER_CALL(name); \
  919. } \
  920. EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
  921. struct bpf_dispatcher bpf_dispatcher_##name = \
  922. BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
  923. #define DECLARE_BPF_DISPATCHER(name) \
  924. unsigned int bpf_dispatcher_##name##_func( \
  925. const void *ctx, \
  926. const struct bpf_insn *insnsi, \
  927. bpf_func_t bpf_func); \
  928. extern struct bpf_dispatcher bpf_dispatcher_##name;
  929. #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
  930. #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
  931. void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
  932. struct bpf_prog *to);
  933. /* Called only from JIT-enabled code, so there's no need for stubs. */
  934. void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
  935. void bpf_image_ksym_del(struct bpf_ksym *ksym);
  936. void bpf_ksym_add(struct bpf_ksym *ksym);
  937. void bpf_ksym_del(struct bpf_ksym *ksym);
  938. int bpf_jit_charge_modmem(u32 size);
  939. void bpf_jit_uncharge_modmem(u32 size);
  940. bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
  941. #else
  942. static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
  943. struct bpf_trampoline *tr)
  944. {
  945. return -ENOTSUPP;
  946. }
  947. static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
  948. struct bpf_trampoline *tr)
  949. {
  950. return -ENOTSUPP;
  951. }
  952. static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
  953. struct bpf_attach_target_info *tgt_info)
  954. {
  955. return NULL;
  956. }
  957. static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
  958. #define DEFINE_BPF_DISPATCHER(name)
  959. #define DECLARE_BPF_DISPATCHER(name)
  960. #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
  961. #define BPF_DISPATCHER_PTR(name) NULL
  962. static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
  963. struct bpf_prog *from,
  964. struct bpf_prog *to) {}
  965. static inline bool is_bpf_image_address(unsigned long address)
  966. {
  967. return false;
  968. }
  969. static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
  970. {
  971. return false;
  972. }
  973. #endif
  974. struct bpf_func_info_aux {
  975. u16 linkage;
  976. bool unreliable;
  977. };
  978. enum bpf_jit_poke_reason {
  979. BPF_POKE_REASON_TAIL_CALL,
  980. };
  981. /* Descriptor of pokes pointing /into/ the JITed image. */
  982. struct bpf_jit_poke_descriptor {
  983. void *tailcall_target;
  984. void *tailcall_bypass;
  985. void *bypass_addr;
  986. void *aux;
  987. union {
  988. struct {
  989. struct bpf_map *map;
  990. u32 key;
  991. } tail_call;
  992. };
  993. bool tailcall_target_stable;
  994. u8 adj_off;
  995. u16 reason;
  996. u32 insn_idx;
  997. };
  998. /* reg_type info for ctx arguments */
  999. struct bpf_ctx_arg_aux {
  1000. u32 offset;
  1001. enum bpf_reg_type reg_type;
  1002. u32 btf_id;
  1003. };
  1004. struct btf_mod_pair {
  1005. struct btf *btf;
  1006. struct module *module;
  1007. };
  1008. struct bpf_kfunc_desc_tab;
  1009. struct bpf_prog_aux {
  1010. atomic64_t refcnt;
  1011. u32 used_map_cnt;
  1012. u32 used_btf_cnt;
  1013. u32 max_ctx_offset;
  1014. u32 max_pkt_offset;
  1015. u32 max_tp_access;
  1016. u32 stack_depth;
  1017. u32 id;
  1018. u32 func_cnt; /* used by non-func prog as the number of func progs */
  1019. u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
  1020. u32 attach_btf_id; /* in-kernel BTF type id to attach to */
  1021. u32 ctx_arg_info_size;
  1022. u32 max_rdonly_access;
  1023. u32 max_rdwr_access;
  1024. struct btf *attach_btf;
  1025. const struct bpf_ctx_arg_aux *ctx_arg_info;
  1026. struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
  1027. struct bpf_prog *dst_prog;
  1028. struct bpf_trampoline *dst_trampoline;
  1029. enum bpf_prog_type saved_dst_prog_type;
  1030. enum bpf_attach_type saved_dst_attach_type;
  1031. bool verifier_zext; /* Zero extensions has been inserted by verifier. */
  1032. bool offload_requested;
  1033. bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
  1034. bool func_proto_unreliable;
  1035. bool sleepable;
  1036. bool tail_call_reachable;
  1037. bool xdp_has_frags;
  1038. /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
  1039. const struct btf_type *attach_func_proto;
  1040. /* function name for valid attach_btf_id */
  1041. const char *attach_func_name;
  1042. struct bpf_prog **func;
  1043. void *jit_data; /* JIT specific data. arch dependent */
  1044. struct bpf_jit_poke_descriptor *poke_tab;
  1045. struct bpf_kfunc_desc_tab *kfunc_tab;
  1046. struct bpf_kfunc_btf_tab *kfunc_btf_tab;
  1047. u32 size_poke_tab;
  1048. struct bpf_ksym ksym;
  1049. const struct bpf_prog_ops *ops;
  1050. struct bpf_map **used_maps;
  1051. struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
  1052. struct btf_mod_pair *used_btfs;
  1053. struct bpf_prog *prog;
  1054. struct user_struct *user;
  1055. u64 load_time; /* ns since boottime */
  1056. u32 verified_insns;
  1057. int cgroup_atype; /* enum cgroup_bpf_attach_type */
  1058. struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
  1059. char name[BPF_OBJ_NAME_LEN];
  1060. #ifdef CONFIG_SECURITY
  1061. void *security;
  1062. #endif
  1063. struct bpf_prog_offload *offload;
  1064. struct btf *btf;
  1065. struct bpf_func_info *func_info;
  1066. struct bpf_func_info_aux *func_info_aux;
  1067. /* bpf_line_info loaded from userspace. linfo->insn_off
  1068. * has the xlated insn offset.
  1069. * Both the main and sub prog share the same linfo.
  1070. * The subprog can access its first linfo by
  1071. * using the linfo_idx.
  1072. */
  1073. struct bpf_line_info *linfo;
  1074. /* jited_linfo is the jited addr of the linfo. It has a
  1075. * one to one mapping to linfo:
  1076. * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
  1077. * Both the main and sub prog share the same jited_linfo.
  1078. * The subprog can access its first jited_linfo by
  1079. * using the linfo_idx.
  1080. */
  1081. void **jited_linfo;
  1082. u32 func_info_cnt;
  1083. u32 nr_linfo;
  1084. /* subprog can use linfo_idx to access its first linfo and
  1085. * jited_linfo.
  1086. * main prog always has linfo_idx == 0
  1087. */
  1088. u32 linfo_idx;
  1089. u32 num_exentries;
  1090. struct exception_table_entry *extable;
  1091. union {
  1092. struct work_struct work;
  1093. struct rcu_head rcu;
  1094. };
  1095. ANDROID_KABI_RESERVE(1);
  1096. };
  1097. struct bpf_prog {
  1098. u16 pages; /* Number of allocated pages */
  1099. u16 jited:1, /* Is our filter JIT'ed? */
  1100. jit_requested:1,/* archs need to JIT the prog */
  1101. gpl_compatible:1, /* Is filter GPL compatible? */
  1102. cb_access:1, /* Is control block accessed? */
  1103. dst_needed:1, /* Do we need dst entry? */
  1104. blinding_requested:1, /* needs constant blinding */
  1105. blinded:1, /* Was blinded */
  1106. is_func:1, /* program is a bpf function */
  1107. kprobe_override:1, /* Do we override a kprobe? */
  1108. has_callchain_buf:1, /* callchain buffer allocated? */
  1109. enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
  1110. call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
  1111. call_get_func_ip:1, /* Do we call get_func_ip() */
  1112. tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
  1113. enum bpf_prog_type type; /* Type of BPF program */
  1114. enum bpf_attach_type expected_attach_type; /* For some prog types */
  1115. u32 len; /* Number of filter blocks */
  1116. u32 jited_len; /* Size of jited insns in bytes */
  1117. u8 tag[BPF_TAG_SIZE];
  1118. struct bpf_prog_stats __percpu *stats;
  1119. int __percpu *active;
  1120. unsigned int (*bpf_func)(const void *ctx,
  1121. const struct bpf_insn *insn);
  1122. struct bpf_prog_aux *aux; /* Auxiliary fields */
  1123. struct sock_fprog_kern *orig_prog; /* Original BPF program */
  1124. ANDROID_KABI_RESERVE(1);
  1125. /* Instructions for interpreter */
  1126. union {
  1127. DECLARE_FLEX_ARRAY(struct sock_filter, insns);
  1128. DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
  1129. };
  1130. };
  1131. struct bpf_array_aux {
  1132. /* Programs with direct jumps into programs part of this array. */
  1133. struct list_head poke_progs;
  1134. struct bpf_map *map;
  1135. struct mutex poke_mutex;
  1136. struct work_struct work;
  1137. };
  1138. struct bpf_link {
  1139. atomic64_t refcnt;
  1140. u32 id;
  1141. enum bpf_link_type type;
  1142. const struct bpf_link_ops *ops;
  1143. struct bpf_prog *prog;
  1144. struct work_struct work;
  1145. };
  1146. struct bpf_link_ops {
  1147. void (*release)(struct bpf_link *link);
  1148. void (*dealloc)(struct bpf_link *link);
  1149. int (*detach)(struct bpf_link *link);
  1150. int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
  1151. struct bpf_prog *old_prog);
  1152. void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
  1153. int (*fill_link_info)(const struct bpf_link *link,
  1154. struct bpf_link_info *info);
  1155. ANDROID_KABI_RESERVE(1);
  1156. };
  1157. struct bpf_tramp_link {
  1158. struct bpf_link link;
  1159. struct hlist_node tramp_hlist;
  1160. u64 cookie;
  1161. };
  1162. struct bpf_shim_tramp_link {
  1163. struct bpf_tramp_link link;
  1164. struct bpf_trampoline *trampoline;
  1165. };
  1166. struct bpf_tracing_link {
  1167. struct bpf_tramp_link link;
  1168. enum bpf_attach_type attach_type;
  1169. struct bpf_trampoline *trampoline;
  1170. struct bpf_prog *tgt_prog;
  1171. };
  1172. struct bpf_link_primer {
  1173. struct bpf_link *link;
  1174. struct file *file;
  1175. int fd;
  1176. u32 id;
  1177. };
  1178. struct bpf_struct_ops_value;
  1179. struct btf_member;
  1180. #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
  1181. struct bpf_struct_ops {
  1182. const struct bpf_verifier_ops *verifier_ops;
  1183. int (*init)(struct btf *btf);
  1184. int (*check_member)(const struct btf_type *t,
  1185. const struct btf_member *member);
  1186. int (*init_member)(const struct btf_type *t,
  1187. const struct btf_member *member,
  1188. void *kdata, const void *udata);
  1189. int (*reg)(void *kdata);
  1190. void (*unreg)(void *kdata);
  1191. const struct btf_type *type;
  1192. const struct btf_type *value_type;
  1193. const char *name;
  1194. struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
  1195. u32 type_id;
  1196. u32 value_id;
  1197. ANDROID_KABI_RESERVE(1);
  1198. };
  1199. #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
  1200. #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
  1201. const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
  1202. void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
  1203. bool bpf_struct_ops_get(const void *kdata);
  1204. void bpf_struct_ops_put(const void *kdata);
  1205. int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
  1206. void *value);
  1207. int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
  1208. struct bpf_tramp_link *link,
  1209. const struct btf_func_model *model,
  1210. void *image, void *image_end);
  1211. static inline bool bpf_try_module_get(const void *data, struct module *owner)
  1212. {
  1213. if (owner == BPF_MODULE_OWNER)
  1214. return bpf_struct_ops_get(data);
  1215. else
  1216. return try_module_get(owner);
  1217. }
  1218. static inline void bpf_module_put(const void *data, struct module *owner)
  1219. {
  1220. if (owner == BPF_MODULE_OWNER)
  1221. bpf_struct_ops_put(data);
  1222. else
  1223. module_put(owner);
  1224. }
  1225. #ifdef CONFIG_NET
  1226. /* Define it here to avoid the use of forward declaration */
  1227. struct bpf_dummy_ops_state {
  1228. int val;
  1229. };
  1230. struct bpf_dummy_ops {
  1231. int (*test_1)(struct bpf_dummy_ops_state *cb);
  1232. int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
  1233. char a3, unsigned long a4);
  1234. };
  1235. int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
  1236. union bpf_attr __user *uattr);
  1237. #endif
  1238. #else
  1239. static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
  1240. {
  1241. return NULL;
  1242. }
  1243. static inline void bpf_struct_ops_init(struct btf *btf,
  1244. struct bpf_verifier_log *log)
  1245. {
  1246. }
  1247. static inline bool bpf_try_module_get(const void *data, struct module *owner)
  1248. {
  1249. return try_module_get(owner);
  1250. }
  1251. static inline void bpf_module_put(const void *data, struct module *owner)
  1252. {
  1253. module_put(owner);
  1254. }
  1255. static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
  1256. void *key,
  1257. void *value)
  1258. {
  1259. return -EINVAL;
  1260. }
  1261. #endif
  1262. #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
  1263. int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
  1264. int cgroup_atype);
  1265. void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
  1266. #else
  1267. static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
  1268. int cgroup_atype)
  1269. {
  1270. return -EOPNOTSUPP;
  1271. }
  1272. static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
  1273. {
  1274. }
  1275. #endif
  1276. struct bpf_array {
  1277. struct bpf_map map;
  1278. u32 elem_size;
  1279. u32 index_mask;
  1280. struct bpf_array_aux *aux;
  1281. union {
  1282. char value[0] __aligned(8);
  1283. void *ptrs[0] __aligned(8);
  1284. void __percpu *pptrs[0] __aligned(8);
  1285. };
  1286. };
  1287. #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
  1288. #define MAX_TAIL_CALL_CNT 33
  1289. /* Maximum number of loops for bpf_loop */
  1290. #define BPF_MAX_LOOPS BIT(23)
  1291. #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
  1292. BPF_F_RDONLY_PROG | \
  1293. BPF_F_WRONLY | \
  1294. BPF_F_WRONLY_PROG)
  1295. #define BPF_MAP_CAN_READ BIT(0)
  1296. #define BPF_MAP_CAN_WRITE BIT(1)
  1297. /* Maximum number of user-producer ring buffer samples that can be drained in
  1298. * a call to bpf_user_ringbuf_drain().
  1299. */
  1300. #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
  1301. static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
  1302. {
  1303. u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
  1304. /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
  1305. * not possible.
  1306. */
  1307. if (access_flags & BPF_F_RDONLY_PROG)
  1308. return BPF_MAP_CAN_READ;
  1309. else if (access_flags & BPF_F_WRONLY_PROG)
  1310. return BPF_MAP_CAN_WRITE;
  1311. else
  1312. return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
  1313. }
  1314. static inline bool bpf_map_flags_access_ok(u32 access_flags)
  1315. {
  1316. return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
  1317. (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
  1318. }
  1319. struct bpf_event_entry {
  1320. struct perf_event *event;
  1321. struct file *perf_file;
  1322. struct file *map_file;
  1323. struct rcu_head rcu;
  1324. };
  1325. static inline bool map_type_contains_progs(struct bpf_map *map)
  1326. {
  1327. return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
  1328. map->map_type == BPF_MAP_TYPE_DEVMAP ||
  1329. map->map_type == BPF_MAP_TYPE_CPUMAP;
  1330. }
  1331. bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
  1332. int bpf_prog_calc_tag(struct bpf_prog *fp);
  1333. const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
  1334. const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
  1335. typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
  1336. unsigned long off, unsigned long len);
  1337. typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
  1338. const struct bpf_insn *src,
  1339. struct bpf_insn *dst,
  1340. struct bpf_prog *prog,
  1341. u32 *target_size);
  1342. u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
  1343. void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
  1344. /* an array of programs to be executed under rcu_lock.
  1345. *
  1346. * Typical usage:
  1347. * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
  1348. *
  1349. * the structure returned by bpf_prog_array_alloc() should be populated
  1350. * with program pointers and the last pointer must be NULL.
  1351. * The user has to keep refcnt on the program and make sure the program
  1352. * is removed from the array before bpf_prog_put().
  1353. * The 'struct bpf_prog_array *' should only be replaced with xchg()
  1354. * since other cpus are walking the array of pointers in parallel.
  1355. */
  1356. struct bpf_prog_array_item {
  1357. struct bpf_prog *prog;
  1358. union {
  1359. struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
  1360. u64 bpf_cookie;
  1361. };
  1362. };
  1363. struct bpf_prog_array {
  1364. struct rcu_head rcu;
  1365. struct bpf_prog_array_item items[];
  1366. };
  1367. struct bpf_empty_prog_array {
  1368. struct bpf_prog_array hdr;
  1369. struct bpf_prog *null_prog;
  1370. };
  1371. /* to avoid allocating empty bpf_prog_array for cgroups that
  1372. * don't have bpf program attached use one global 'bpf_empty_prog_array'
  1373. * It will not be modified the caller of bpf_prog_array_alloc()
  1374. * (since caller requested prog_cnt == 0)
  1375. * that pointer should be 'freed' by bpf_prog_array_free()
  1376. */
  1377. extern struct bpf_empty_prog_array bpf_empty_prog_array;
  1378. struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
  1379. void bpf_prog_array_free(struct bpf_prog_array *progs);
  1380. /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
  1381. void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
  1382. int bpf_prog_array_length(struct bpf_prog_array *progs);
  1383. bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
  1384. int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
  1385. __u32 __user *prog_ids, u32 cnt);
  1386. void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
  1387. struct bpf_prog *old_prog);
  1388. int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
  1389. int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
  1390. struct bpf_prog *prog);
  1391. int bpf_prog_array_copy_info(struct bpf_prog_array *array,
  1392. u32 *prog_ids, u32 request_cnt,
  1393. u32 *prog_cnt);
  1394. int bpf_prog_array_copy(struct bpf_prog_array *old_array,
  1395. struct bpf_prog *exclude_prog,
  1396. struct bpf_prog *include_prog,
  1397. u64 bpf_cookie,
  1398. struct bpf_prog_array **new_array);
  1399. struct bpf_run_ctx {};
  1400. struct bpf_cg_run_ctx {
  1401. struct bpf_run_ctx run_ctx;
  1402. const struct bpf_prog_array_item *prog_item;
  1403. int retval;
  1404. };
  1405. struct bpf_trace_run_ctx {
  1406. struct bpf_run_ctx run_ctx;
  1407. u64 bpf_cookie;
  1408. };
  1409. struct bpf_tramp_run_ctx {
  1410. struct bpf_run_ctx run_ctx;
  1411. u64 bpf_cookie;
  1412. struct bpf_run_ctx *saved_run_ctx;
  1413. };
  1414. static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
  1415. {
  1416. struct bpf_run_ctx *old_ctx = NULL;
  1417. #ifdef CONFIG_BPF_SYSCALL
  1418. old_ctx = current->bpf_ctx;
  1419. current->bpf_ctx = new_ctx;
  1420. #endif
  1421. return old_ctx;
  1422. }
  1423. static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
  1424. {
  1425. #ifdef CONFIG_BPF_SYSCALL
  1426. current->bpf_ctx = old_ctx;
  1427. #endif
  1428. }
  1429. /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
  1430. #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
  1431. /* BPF program asks to set CN on the packet. */
  1432. #define BPF_RET_SET_CN (1 << 0)
  1433. typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
  1434. static __always_inline u32
  1435. bpf_prog_run_array(const struct bpf_prog_array *array,
  1436. const void *ctx, bpf_prog_run_fn run_prog)
  1437. {
  1438. const struct bpf_prog_array_item *item;
  1439. const struct bpf_prog *prog;
  1440. struct bpf_run_ctx *old_run_ctx;
  1441. struct bpf_trace_run_ctx run_ctx;
  1442. u32 ret = 1;
  1443. RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
  1444. if (unlikely(!array))
  1445. return ret;
  1446. migrate_disable();
  1447. old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
  1448. item = &array->items[0];
  1449. while ((prog = READ_ONCE(item->prog))) {
  1450. run_ctx.bpf_cookie = item->bpf_cookie;
  1451. ret &= run_prog(prog, ctx);
  1452. item++;
  1453. }
  1454. bpf_reset_run_ctx(old_run_ctx);
  1455. migrate_enable();
  1456. return ret;
  1457. }
  1458. /* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
  1459. *
  1460. * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
  1461. * overall. As a result, we must use the bpf_prog_array_free_sleepable
  1462. * in order to use the tasks_trace rcu grace period.
  1463. *
  1464. * When a non-sleepable program is inside the array, we take the rcu read
  1465. * section and disable preemption for that program alone, so it can access
  1466. * rcu-protected dynamically sized maps.
  1467. */
  1468. static __always_inline u32
  1469. bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
  1470. const void *ctx, bpf_prog_run_fn run_prog)
  1471. {
  1472. const struct bpf_prog_array_item *item;
  1473. const struct bpf_prog *prog;
  1474. const struct bpf_prog_array *array;
  1475. struct bpf_run_ctx *old_run_ctx;
  1476. struct bpf_trace_run_ctx run_ctx;
  1477. u32 ret = 1;
  1478. might_fault();
  1479. rcu_read_lock_trace();
  1480. migrate_disable();
  1481. array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
  1482. if (unlikely(!array))
  1483. goto out;
  1484. old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
  1485. item = &array->items[0];
  1486. while ((prog = READ_ONCE(item->prog))) {
  1487. if (!prog->aux->sleepable)
  1488. rcu_read_lock();
  1489. run_ctx.bpf_cookie = item->bpf_cookie;
  1490. ret &= run_prog(prog, ctx);
  1491. item++;
  1492. if (!prog->aux->sleepable)
  1493. rcu_read_unlock();
  1494. }
  1495. bpf_reset_run_ctx(old_run_ctx);
  1496. out:
  1497. migrate_enable();
  1498. rcu_read_unlock_trace();
  1499. return ret;
  1500. }
  1501. #ifdef CONFIG_BPF_SYSCALL
  1502. DECLARE_PER_CPU(int, bpf_prog_active);
  1503. extern struct mutex bpf_stats_enabled_mutex;
  1504. /*
  1505. * Block execution of BPF programs attached to instrumentation (perf,
  1506. * kprobes, tracepoints) to prevent deadlocks on map operations as any of
  1507. * these events can happen inside a region which holds a map bucket lock
  1508. * and can deadlock on it.
  1509. */
  1510. static inline void bpf_disable_instrumentation(void)
  1511. {
  1512. migrate_disable();
  1513. this_cpu_inc(bpf_prog_active);
  1514. }
  1515. static inline void bpf_enable_instrumentation(void)
  1516. {
  1517. this_cpu_dec(bpf_prog_active);
  1518. migrate_enable();
  1519. }
  1520. extern const struct file_operations bpf_map_fops;
  1521. extern const struct file_operations bpf_prog_fops;
  1522. extern const struct file_operations bpf_iter_fops;
  1523. #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
  1524. extern const struct bpf_prog_ops _name ## _prog_ops; \
  1525. extern const struct bpf_verifier_ops _name ## _verifier_ops;
  1526. #define BPF_MAP_TYPE(_id, _ops) \
  1527. extern const struct bpf_map_ops _ops;
  1528. #define BPF_LINK_TYPE(_id, _name)
  1529. #include <linux/bpf_types.h>
  1530. #undef BPF_PROG_TYPE
  1531. #undef BPF_MAP_TYPE
  1532. #undef BPF_LINK_TYPE
  1533. extern const struct bpf_prog_ops bpf_offload_prog_ops;
  1534. extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
  1535. extern const struct bpf_verifier_ops xdp_analyzer_ops;
  1536. struct bpf_prog *bpf_prog_get(u32 ufd);
  1537. struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
  1538. bool attach_drv);
  1539. void bpf_prog_add(struct bpf_prog *prog, int i);
  1540. void bpf_prog_sub(struct bpf_prog *prog, int i);
  1541. void bpf_prog_inc(struct bpf_prog *prog);
  1542. struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
  1543. void bpf_prog_put(struct bpf_prog *prog);
  1544. void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
  1545. void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
  1546. struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset);
  1547. void bpf_map_free_kptr_off_tab(struct bpf_map *map);
  1548. struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map);
  1549. bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b);
  1550. void bpf_map_free_kptrs(struct bpf_map *map, void *map_value);
  1551. struct bpf_map *bpf_map_get(u32 ufd);
  1552. struct bpf_map *bpf_map_get_with_uref(u32 ufd);
  1553. struct bpf_map *__bpf_map_get(struct fd f);
  1554. void bpf_map_inc(struct bpf_map *map);
  1555. void bpf_map_inc_with_uref(struct bpf_map *map);
  1556. struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
  1557. void bpf_map_put_with_uref(struct bpf_map *map);
  1558. void bpf_map_put(struct bpf_map *map);
  1559. void *bpf_map_area_alloc(u64 size, int numa_node);
  1560. void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
  1561. void bpf_map_area_free(void *base);
  1562. bool bpf_map_write_active(const struct bpf_map *map);
  1563. void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
  1564. int generic_map_lookup_batch(struct bpf_map *map,
  1565. const union bpf_attr *attr,
  1566. union bpf_attr __user *uattr);
  1567. int generic_map_update_batch(struct bpf_map *map,
  1568. const union bpf_attr *attr,
  1569. union bpf_attr __user *uattr);
  1570. int generic_map_delete_batch(struct bpf_map *map,
  1571. const union bpf_attr *attr,
  1572. union bpf_attr __user *uattr);
  1573. struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
  1574. struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
  1575. #ifdef CONFIG_MEMCG_KMEM
  1576. void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
  1577. int node);
  1578. void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
  1579. void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
  1580. size_t align, gfp_t flags);
  1581. #else
  1582. static inline void *
  1583. bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
  1584. int node)
  1585. {
  1586. return kmalloc_node(size, flags, node);
  1587. }
  1588. static inline void *
  1589. bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
  1590. {
  1591. return kzalloc(size, flags);
  1592. }
  1593. static inline void __percpu *
  1594. bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
  1595. gfp_t flags)
  1596. {
  1597. return __alloc_percpu_gfp(size, align, flags);
  1598. }
  1599. #endif
  1600. extern int sysctl_unprivileged_bpf_disabled;
  1601. static inline bool bpf_allow_ptr_leaks(void)
  1602. {
  1603. return perfmon_capable();
  1604. }
  1605. static inline bool bpf_allow_uninit_stack(void)
  1606. {
  1607. return perfmon_capable();
  1608. }
  1609. static inline bool bpf_allow_ptr_to_map_access(void)
  1610. {
  1611. return perfmon_capable();
  1612. }
  1613. static inline bool bpf_bypass_spec_v1(void)
  1614. {
  1615. return perfmon_capable();
  1616. }
  1617. static inline bool bpf_bypass_spec_v4(void)
  1618. {
  1619. return perfmon_capable();
  1620. }
  1621. int bpf_map_new_fd(struct bpf_map *map, int flags);
  1622. int bpf_prog_new_fd(struct bpf_prog *prog);
  1623. void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
  1624. const struct bpf_link_ops *ops, struct bpf_prog *prog);
  1625. int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
  1626. int bpf_link_settle(struct bpf_link_primer *primer);
  1627. void bpf_link_cleanup(struct bpf_link_primer *primer);
  1628. void bpf_link_inc(struct bpf_link *link);
  1629. void bpf_link_put(struct bpf_link *link);
  1630. int bpf_link_new_fd(struct bpf_link *link);
  1631. struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
  1632. struct bpf_link *bpf_link_get_from_fd(u32 ufd);
  1633. struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
  1634. int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
  1635. int bpf_obj_get_user(const char __user *pathname, int flags);
  1636. #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
  1637. #define DEFINE_BPF_ITER_FUNC(target, args...) \
  1638. extern int bpf_iter_ ## target(args); \
  1639. int __init bpf_iter_ ## target(args) { return 0; }
  1640. /*
  1641. * The task type of iterators.
  1642. *
  1643. * For BPF task iterators, they can be parameterized with various
  1644. * parameters to visit only some of tasks.
  1645. *
  1646. * BPF_TASK_ITER_ALL (default)
  1647. * Iterate over resources of every task.
  1648. *
  1649. * BPF_TASK_ITER_TID
  1650. * Iterate over resources of a task/tid.
  1651. *
  1652. * BPF_TASK_ITER_TGID
  1653. * Iterate over resources of every task of a process / task group.
  1654. */
  1655. enum bpf_iter_task_type {
  1656. BPF_TASK_ITER_ALL = 0,
  1657. BPF_TASK_ITER_TID,
  1658. BPF_TASK_ITER_TGID,
  1659. };
  1660. struct bpf_iter_aux_info {
  1661. /* for map_elem iter */
  1662. struct bpf_map *map;
  1663. /* for cgroup iter */
  1664. struct {
  1665. struct cgroup *start; /* starting cgroup */
  1666. enum bpf_cgroup_iter_order order;
  1667. } cgroup;
  1668. struct {
  1669. enum bpf_iter_task_type type;
  1670. u32 pid;
  1671. } task;
  1672. };
  1673. typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
  1674. union bpf_iter_link_info *linfo,
  1675. struct bpf_iter_aux_info *aux);
  1676. typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
  1677. typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
  1678. struct seq_file *seq);
  1679. typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
  1680. struct bpf_link_info *info);
  1681. typedef const struct bpf_func_proto *
  1682. (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
  1683. const struct bpf_prog *prog);
  1684. enum bpf_iter_feature {
  1685. BPF_ITER_RESCHED = BIT(0),
  1686. };
  1687. #define BPF_ITER_CTX_ARG_MAX 2
  1688. struct bpf_iter_reg {
  1689. const char *target;
  1690. bpf_iter_attach_target_t attach_target;
  1691. bpf_iter_detach_target_t detach_target;
  1692. bpf_iter_show_fdinfo_t show_fdinfo;
  1693. bpf_iter_fill_link_info_t fill_link_info;
  1694. bpf_iter_get_func_proto_t get_func_proto;
  1695. u32 ctx_arg_info_size;
  1696. u32 feature;
  1697. struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
  1698. const struct bpf_iter_seq_info *seq_info;
  1699. };
  1700. struct bpf_iter_meta {
  1701. __bpf_md_ptr(struct seq_file *, seq);
  1702. u64 session_id;
  1703. u64 seq_num;
  1704. };
  1705. struct bpf_iter__bpf_map_elem {
  1706. __bpf_md_ptr(struct bpf_iter_meta *, meta);
  1707. __bpf_md_ptr(struct bpf_map *, map);
  1708. __bpf_md_ptr(void *, key);
  1709. __bpf_md_ptr(void *, value);
  1710. };
  1711. int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
  1712. void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
  1713. bool bpf_iter_prog_supported(struct bpf_prog *prog);
  1714. const struct bpf_func_proto *
  1715. bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
  1716. int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
  1717. int bpf_iter_new_fd(struct bpf_link *link);
  1718. bool bpf_link_is_iter(struct bpf_link *link);
  1719. struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
  1720. int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
  1721. void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
  1722. struct seq_file *seq);
  1723. int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
  1724. struct bpf_link_info *info);
  1725. int map_set_for_each_callback_args(struct bpf_verifier_env *env,
  1726. struct bpf_func_state *caller,
  1727. struct bpf_func_state *callee);
  1728. int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
  1729. int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
  1730. int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
  1731. u64 flags);
  1732. int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  1733. u64 flags);
  1734. int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
  1735. int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  1736. void *key, void *value, u64 map_flags);
  1737. int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
  1738. int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
  1739. void *key, void *value, u64 map_flags);
  1740. int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
  1741. int bpf_get_file_flag(int flags);
  1742. int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
  1743. size_t actual_size);
  1744. /* verify correctness of eBPF program */
  1745. int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
  1746. #ifndef CONFIG_BPF_JIT_ALWAYS_ON
  1747. void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
  1748. #endif
  1749. struct btf *bpf_get_btf_vmlinux(void);
  1750. /* Map specifics */
  1751. struct xdp_frame;
  1752. struct sk_buff;
  1753. struct bpf_dtab_netdev;
  1754. struct bpf_cpu_map_entry;
  1755. void __dev_flush(void);
  1756. int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
  1757. struct net_device *dev_rx);
  1758. int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
  1759. struct net_device *dev_rx);
  1760. int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
  1761. struct bpf_map *map, bool exclude_ingress);
  1762. int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
  1763. struct bpf_prog *xdp_prog);
  1764. int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
  1765. struct bpf_prog *xdp_prog, struct bpf_map *map,
  1766. bool exclude_ingress);
  1767. void __cpu_map_flush(void);
  1768. int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
  1769. struct net_device *dev_rx);
  1770. int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
  1771. struct sk_buff *skb);
  1772. /* Return map's numa specified by userspace */
  1773. static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
  1774. {
  1775. return (attr->map_flags & BPF_F_NUMA_NODE) ?
  1776. attr->numa_node : NUMA_NO_NODE;
  1777. }
  1778. struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
  1779. int array_map_alloc_check(union bpf_attr *attr);
  1780. int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
  1781. union bpf_attr __user *uattr);
  1782. int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
  1783. union bpf_attr __user *uattr);
  1784. int bpf_prog_test_run_tracing(struct bpf_prog *prog,
  1785. const union bpf_attr *kattr,
  1786. union bpf_attr __user *uattr);
  1787. int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
  1788. const union bpf_attr *kattr,
  1789. union bpf_attr __user *uattr);
  1790. int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
  1791. const union bpf_attr *kattr,
  1792. union bpf_attr __user *uattr);
  1793. int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
  1794. const union bpf_attr *kattr,
  1795. union bpf_attr __user *uattr);
  1796. bool btf_ctx_access(int off, int size, enum bpf_access_type type,
  1797. const struct bpf_prog *prog,
  1798. struct bpf_insn_access_aux *info);
  1799. static inline bool bpf_tracing_ctx_access(int off, int size,
  1800. enum bpf_access_type type)
  1801. {
  1802. if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
  1803. return false;
  1804. if (type != BPF_READ)
  1805. return false;
  1806. if (off % size != 0)
  1807. return false;
  1808. return true;
  1809. }
  1810. static inline bool bpf_tracing_btf_ctx_access(int off, int size,
  1811. enum bpf_access_type type,
  1812. const struct bpf_prog *prog,
  1813. struct bpf_insn_access_aux *info)
  1814. {
  1815. if (!bpf_tracing_ctx_access(off, size, type))
  1816. return false;
  1817. return btf_ctx_access(off, size, type, prog, info);
  1818. }
  1819. int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
  1820. const struct btf_type *t, int off, int size,
  1821. enum bpf_access_type atype,
  1822. u32 *next_btf_id, enum bpf_type_flag *flag);
  1823. bool btf_struct_ids_match(struct bpf_verifier_log *log,
  1824. const struct btf *btf, u32 id, int off,
  1825. const struct btf *need_btf, u32 need_type_id,
  1826. bool strict);
  1827. int btf_distill_func_proto(struct bpf_verifier_log *log,
  1828. struct btf *btf,
  1829. const struct btf_type *func_proto,
  1830. const char *func_name,
  1831. struct btf_func_model *m);
  1832. struct bpf_kfunc_arg_meta {
  1833. u64 r0_size;
  1834. bool r0_rdonly;
  1835. int ref_obj_id;
  1836. u32 flags;
  1837. };
  1838. struct bpf_reg_state;
  1839. int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
  1840. struct bpf_reg_state *regs);
  1841. int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
  1842. struct bpf_reg_state *regs);
  1843. int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
  1844. const struct btf *btf, u32 func_id,
  1845. struct bpf_reg_state *regs,
  1846. struct bpf_kfunc_arg_meta *meta);
  1847. int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
  1848. struct bpf_reg_state *reg);
  1849. int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
  1850. struct btf *btf, const struct btf_type *t);
  1851. struct bpf_prog *bpf_prog_by_id(u32 id);
  1852. struct bpf_link *bpf_link_by_id(u32 id);
  1853. const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
  1854. void bpf_task_storage_free(struct task_struct *task);
  1855. bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
  1856. const struct btf_func_model *
  1857. bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
  1858. const struct bpf_insn *insn);
  1859. struct bpf_core_ctx {
  1860. struct bpf_verifier_log *log;
  1861. const struct btf *btf;
  1862. };
  1863. int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
  1864. int relo_idx, void *insn);
  1865. static inline bool unprivileged_ebpf_enabled(void)
  1866. {
  1867. return !sysctl_unprivileged_bpf_disabled;
  1868. }
  1869. /* Not all bpf prog type has the bpf_ctx.
  1870. * For the bpf prog type that has initialized the bpf_ctx,
  1871. * this function can be used to decide if a kernel function
  1872. * is called by a bpf program.
  1873. */
  1874. static inline bool has_current_bpf_ctx(void)
  1875. {
  1876. return !!current->bpf_ctx;
  1877. }
  1878. void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
  1879. #else /* !CONFIG_BPF_SYSCALL */
  1880. static inline struct bpf_prog *bpf_prog_get(u32 ufd)
  1881. {
  1882. return ERR_PTR(-EOPNOTSUPP);
  1883. }
  1884. static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
  1885. enum bpf_prog_type type,
  1886. bool attach_drv)
  1887. {
  1888. return ERR_PTR(-EOPNOTSUPP);
  1889. }
  1890. static inline void bpf_prog_add(struct bpf_prog *prog, int i)
  1891. {
  1892. }
  1893. static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
  1894. {
  1895. }
  1896. static inline void bpf_prog_put(struct bpf_prog *prog)
  1897. {
  1898. }
  1899. static inline void bpf_prog_inc(struct bpf_prog *prog)
  1900. {
  1901. }
  1902. static inline struct bpf_prog *__must_check
  1903. bpf_prog_inc_not_zero(struct bpf_prog *prog)
  1904. {
  1905. return ERR_PTR(-EOPNOTSUPP);
  1906. }
  1907. static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
  1908. const struct bpf_link_ops *ops,
  1909. struct bpf_prog *prog)
  1910. {
  1911. }
  1912. static inline int bpf_link_prime(struct bpf_link *link,
  1913. struct bpf_link_primer *primer)
  1914. {
  1915. return -EOPNOTSUPP;
  1916. }
  1917. static inline int bpf_link_settle(struct bpf_link_primer *primer)
  1918. {
  1919. return -EOPNOTSUPP;
  1920. }
  1921. static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
  1922. {
  1923. }
  1924. static inline void bpf_link_inc(struct bpf_link *link)
  1925. {
  1926. }
  1927. static inline void bpf_link_put(struct bpf_link *link)
  1928. {
  1929. }
  1930. static inline int bpf_obj_get_user(const char __user *pathname, int flags)
  1931. {
  1932. return -EOPNOTSUPP;
  1933. }
  1934. static inline void __dev_flush(void)
  1935. {
  1936. }
  1937. struct xdp_frame;
  1938. struct bpf_dtab_netdev;
  1939. struct bpf_cpu_map_entry;
  1940. static inline
  1941. int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
  1942. struct net_device *dev_rx)
  1943. {
  1944. return 0;
  1945. }
  1946. static inline
  1947. int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
  1948. struct net_device *dev_rx)
  1949. {
  1950. return 0;
  1951. }
  1952. static inline
  1953. int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
  1954. struct bpf_map *map, bool exclude_ingress)
  1955. {
  1956. return 0;
  1957. }
  1958. struct sk_buff;
  1959. static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
  1960. struct sk_buff *skb,
  1961. struct bpf_prog *xdp_prog)
  1962. {
  1963. return 0;
  1964. }
  1965. static inline
  1966. int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
  1967. struct bpf_prog *xdp_prog, struct bpf_map *map,
  1968. bool exclude_ingress)
  1969. {
  1970. return 0;
  1971. }
  1972. static inline void __cpu_map_flush(void)
  1973. {
  1974. }
  1975. static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
  1976. struct xdp_frame *xdpf,
  1977. struct net_device *dev_rx)
  1978. {
  1979. return 0;
  1980. }
  1981. static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
  1982. struct sk_buff *skb)
  1983. {
  1984. return -EOPNOTSUPP;
  1985. }
  1986. static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
  1987. enum bpf_prog_type type)
  1988. {
  1989. return ERR_PTR(-EOPNOTSUPP);
  1990. }
  1991. static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
  1992. const union bpf_attr *kattr,
  1993. union bpf_attr __user *uattr)
  1994. {
  1995. return -ENOTSUPP;
  1996. }
  1997. static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
  1998. const union bpf_attr *kattr,
  1999. union bpf_attr __user *uattr)
  2000. {
  2001. return -ENOTSUPP;
  2002. }
  2003. static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
  2004. const union bpf_attr *kattr,
  2005. union bpf_attr __user *uattr)
  2006. {
  2007. return -ENOTSUPP;
  2008. }
  2009. static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
  2010. const union bpf_attr *kattr,
  2011. union bpf_attr __user *uattr)
  2012. {
  2013. return -ENOTSUPP;
  2014. }
  2015. static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
  2016. const union bpf_attr *kattr,
  2017. union bpf_attr __user *uattr)
  2018. {
  2019. return -ENOTSUPP;
  2020. }
  2021. static inline void bpf_map_put(struct bpf_map *map)
  2022. {
  2023. }
  2024. static inline struct bpf_prog *bpf_prog_by_id(u32 id)
  2025. {
  2026. return ERR_PTR(-ENOTSUPP);
  2027. }
  2028. static inline int btf_struct_access(struct bpf_verifier_log *log,
  2029. const struct btf *btf,
  2030. const struct btf_type *t, int off, int size,
  2031. enum bpf_access_type atype,
  2032. u32 *next_btf_id, enum bpf_type_flag *flag)
  2033. {
  2034. return -EACCES;
  2035. }
  2036. static inline const struct bpf_func_proto *
  2037. bpf_base_func_proto(enum bpf_func_id func_id)
  2038. {
  2039. return NULL;
  2040. }
  2041. static inline void bpf_task_storage_free(struct task_struct *task)
  2042. {
  2043. }
  2044. static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
  2045. {
  2046. return false;
  2047. }
  2048. static inline const struct btf_func_model *
  2049. bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
  2050. const struct bpf_insn *insn)
  2051. {
  2052. return NULL;
  2053. }
  2054. static inline bool unprivileged_ebpf_enabled(void)
  2055. {
  2056. return false;
  2057. }
  2058. static inline bool has_current_bpf_ctx(void)
  2059. {
  2060. return false;
  2061. }
  2062. static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
  2063. {
  2064. }
  2065. #endif /* CONFIG_BPF_SYSCALL */
  2066. void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
  2067. struct btf_mod_pair *used_btfs, u32 len);
  2068. static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
  2069. enum bpf_prog_type type)
  2070. {
  2071. return bpf_prog_get_type_dev(ufd, type, false);
  2072. }
  2073. void __bpf_free_used_maps(struct bpf_prog_aux *aux,
  2074. struct bpf_map **used_maps, u32 len);
  2075. bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
  2076. int bpf_prog_offload_compile(struct bpf_prog *prog);
  2077. void bpf_prog_offload_destroy(struct bpf_prog *prog);
  2078. int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
  2079. struct bpf_prog *prog);
  2080. int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
  2081. int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
  2082. int bpf_map_offload_update_elem(struct bpf_map *map,
  2083. void *key, void *value, u64 flags);
  2084. int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
  2085. int bpf_map_offload_get_next_key(struct bpf_map *map,
  2086. void *key, void *next_key);
  2087. bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
  2088. struct bpf_offload_dev *
  2089. bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
  2090. void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
  2091. void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
  2092. int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
  2093. struct net_device *netdev);
  2094. void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
  2095. struct net_device *netdev);
  2096. bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
  2097. void unpriv_ebpf_notify(int new_state);
  2098. #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
  2099. int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
  2100. static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
  2101. {
  2102. return aux->offload_requested;
  2103. }
  2104. static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
  2105. {
  2106. return unlikely(map->ops == &bpf_map_offload_ops);
  2107. }
  2108. struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
  2109. void bpf_map_offload_map_free(struct bpf_map *map);
  2110. int bpf_prog_test_run_syscall(struct bpf_prog *prog,
  2111. const union bpf_attr *kattr,
  2112. union bpf_attr __user *uattr);
  2113. int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
  2114. int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
  2115. int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
  2116. int sock_map_bpf_prog_query(const union bpf_attr *attr,
  2117. union bpf_attr __user *uattr);
  2118. void sock_map_unhash(struct sock *sk);
  2119. void sock_map_destroy(struct sock *sk);
  2120. void sock_map_close(struct sock *sk, long timeout);
  2121. #else
  2122. static inline int bpf_prog_offload_init(struct bpf_prog *prog,
  2123. union bpf_attr *attr)
  2124. {
  2125. return -EOPNOTSUPP;
  2126. }
  2127. static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
  2128. {
  2129. return false;
  2130. }
  2131. static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
  2132. {
  2133. return false;
  2134. }
  2135. static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
  2136. {
  2137. return ERR_PTR(-EOPNOTSUPP);
  2138. }
  2139. static inline void bpf_map_offload_map_free(struct bpf_map *map)
  2140. {
  2141. }
  2142. static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
  2143. const union bpf_attr *kattr,
  2144. union bpf_attr __user *uattr)
  2145. {
  2146. return -ENOTSUPP;
  2147. }
  2148. #ifdef CONFIG_BPF_SYSCALL
  2149. static inline int sock_map_get_from_fd(const union bpf_attr *attr,
  2150. struct bpf_prog *prog)
  2151. {
  2152. return -EINVAL;
  2153. }
  2154. static inline int sock_map_prog_detach(const union bpf_attr *attr,
  2155. enum bpf_prog_type ptype)
  2156. {
  2157. return -EOPNOTSUPP;
  2158. }
  2159. static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
  2160. u64 flags)
  2161. {
  2162. return -EOPNOTSUPP;
  2163. }
  2164. static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
  2165. union bpf_attr __user *uattr)
  2166. {
  2167. return -EINVAL;
  2168. }
  2169. #endif /* CONFIG_BPF_SYSCALL */
  2170. #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
  2171. #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
  2172. void bpf_sk_reuseport_detach(struct sock *sk);
  2173. int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
  2174. void *value);
  2175. int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
  2176. void *value, u64 map_flags);
  2177. #else
  2178. static inline void bpf_sk_reuseport_detach(struct sock *sk)
  2179. {
  2180. }
  2181. #ifdef CONFIG_BPF_SYSCALL
  2182. static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
  2183. void *key, void *value)
  2184. {
  2185. return -EOPNOTSUPP;
  2186. }
  2187. static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
  2188. void *key, void *value,
  2189. u64 map_flags)
  2190. {
  2191. return -EOPNOTSUPP;
  2192. }
  2193. #endif /* CONFIG_BPF_SYSCALL */
  2194. #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
  2195. /* verifier prototypes for helper functions called from eBPF programs */
  2196. extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
  2197. extern const struct bpf_func_proto bpf_map_update_elem_proto;
  2198. extern const struct bpf_func_proto bpf_map_delete_elem_proto;
  2199. extern const struct bpf_func_proto bpf_map_push_elem_proto;
  2200. extern const struct bpf_func_proto bpf_map_pop_elem_proto;
  2201. extern const struct bpf_func_proto bpf_map_peek_elem_proto;
  2202. extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
  2203. extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
  2204. extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
  2205. extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
  2206. extern const struct bpf_func_proto bpf_tail_call_proto;
  2207. extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
  2208. extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
  2209. extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
  2210. extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
  2211. extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
  2212. extern const struct bpf_func_proto bpf_get_current_comm_proto;
  2213. extern const struct bpf_func_proto bpf_get_stackid_proto;
  2214. extern const struct bpf_func_proto bpf_get_stack_proto;
  2215. extern const struct bpf_func_proto bpf_get_task_stack_proto;
  2216. extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
  2217. extern const struct bpf_func_proto bpf_get_stack_proto_pe;
  2218. extern const struct bpf_func_proto bpf_sock_map_update_proto;
  2219. extern const struct bpf_func_proto bpf_sock_hash_update_proto;
  2220. extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
  2221. extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
  2222. extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
  2223. extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
  2224. extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
  2225. extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
  2226. extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
  2227. extern const struct bpf_func_proto bpf_spin_lock_proto;
  2228. extern const struct bpf_func_proto bpf_spin_unlock_proto;
  2229. extern const struct bpf_func_proto bpf_get_local_storage_proto;
  2230. extern const struct bpf_func_proto bpf_strtol_proto;
  2231. extern const struct bpf_func_proto bpf_strtoul_proto;
  2232. extern const struct bpf_func_proto bpf_tcp_sock_proto;
  2233. extern const struct bpf_func_proto bpf_jiffies64_proto;
  2234. extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
  2235. extern const struct bpf_func_proto bpf_event_output_data_proto;
  2236. extern const struct bpf_func_proto bpf_ringbuf_output_proto;
  2237. extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
  2238. extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
  2239. extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
  2240. extern const struct bpf_func_proto bpf_ringbuf_query_proto;
  2241. extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
  2242. extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
  2243. extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
  2244. extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
  2245. extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
  2246. extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
  2247. extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
  2248. extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
  2249. extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
  2250. extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
  2251. extern const struct bpf_func_proto bpf_copy_from_user_proto;
  2252. extern const struct bpf_func_proto bpf_snprintf_btf_proto;
  2253. extern const struct bpf_func_proto bpf_snprintf_proto;
  2254. extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
  2255. extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
  2256. extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
  2257. extern const struct bpf_func_proto bpf_sock_from_file_proto;
  2258. extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
  2259. extern const struct bpf_func_proto bpf_task_storage_get_proto;
  2260. extern const struct bpf_func_proto bpf_task_storage_delete_proto;
  2261. extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
  2262. extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
  2263. extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
  2264. extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
  2265. extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
  2266. extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
  2267. extern const struct bpf_func_proto bpf_find_vma_proto;
  2268. extern const struct bpf_func_proto bpf_loop_proto;
  2269. extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
  2270. extern const struct bpf_func_proto bpf_set_retval_proto;
  2271. extern const struct bpf_func_proto bpf_get_retval_proto;
  2272. extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
  2273. const struct bpf_func_proto *tracing_prog_func_proto(
  2274. enum bpf_func_id func_id, const struct bpf_prog *prog);
  2275. /* Shared helpers among cBPF and eBPF. */
  2276. void bpf_user_rnd_init_once(void);
  2277. u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  2278. u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  2279. #if defined(CONFIG_NET)
  2280. bool bpf_sock_common_is_valid_access(int off, int size,
  2281. enum bpf_access_type type,
  2282. struct bpf_insn_access_aux *info);
  2283. bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
  2284. struct bpf_insn_access_aux *info);
  2285. u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
  2286. const struct bpf_insn *si,
  2287. struct bpf_insn *insn_buf,
  2288. struct bpf_prog *prog,
  2289. u32 *target_size);
  2290. #else
  2291. static inline bool bpf_sock_common_is_valid_access(int off, int size,
  2292. enum bpf_access_type type,
  2293. struct bpf_insn_access_aux *info)
  2294. {
  2295. return false;
  2296. }
  2297. static inline bool bpf_sock_is_valid_access(int off, int size,
  2298. enum bpf_access_type type,
  2299. struct bpf_insn_access_aux *info)
  2300. {
  2301. return false;
  2302. }
  2303. static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
  2304. const struct bpf_insn *si,
  2305. struct bpf_insn *insn_buf,
  2306. struct bpf_prog *prog,
  2307. u32 *target_size)
  2308. {
  2309. return 0;
  2310. }
  2311. #endif
  2312. #ifdef CONFIG_INET
  2313. struct sk_reuseport_kern {
  2314. struct sk_buff *skb;
  2315. struct sock *sk;
  2316. struct sock *selected_sk;
  2317. struct sock *migrating_sk;
  2318. void *data_end;
  2319. u32 hash;
  2320. u32 reuseport_id;
  2321. bool bind_inany;
  2322. };
  2323. bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
  2324. struct bpf_insn_access_aux *info);
  2325. u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
  2326. const struct bpf_insn *si,
  2327. struct bpf_insn *insn_buf,
  2328. struct bpf_prog *prog,
  2329. u32 *target_size);
  2330. bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
  2331. struct bpf_insn_access_aux *info);
  2332. u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
  2333. const struct bpf_insn *si,
  2334. struct bpf_insn *insn_buf,
  2335. struct bpf_prog *prog,
  2336. u32 *target_size);
  2337. #else
  2338. static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
  2339. enum bpf_access_type type,
  2340. struct bpf_insn_access_aux *info)
  2341. {
  2342. return false;
  2343. }
  2344. static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
  2345. const struct bpf_insn *si,
  2346. struct bpf_insn *insn_buf,
  2347. struct bpf_prog *prog,
  2348. u32 *target_size)
  2349. {
  2350. return 0;
  2351. }
  2352. static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
  2353. enum bpf_access_type type,
  2354. struct bpf_insn_access_aux *info)
  2355. {
  2356. return false;
  2357. }
  2358. static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
  2359. const struct bpf_insn *si,
  2360. struct bpf_insn *insn_buf,
  2361. struct bpf_prog *prog,
  2362. u32 *target_size)
  2363. {
  2364. return 0;
  2365. }
  2366. #endif /* CONFIG_INET */
  2367. enum bpf_text_poke_type {
  2368. BPF_MOD_CALL,
  2369. BPF_MOD_JUMP,
  2370. };
  2371. int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
  2372. void *addr1, void *addr2);
  2373. void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
  2374. struct bpf_prog *new, struct bpf_prog *old);
  2375. void *bpf_arch_text_copy(void *dst, void *src, size_t len);
  2376. int bpf_arch_text_invalidate(void *dst, size_t len);
  2377. struct btf_id_set;
  2378. bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
  2379. #define MAX_BPRINTF_VARARGS 12
  2380. int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
  2381. u32 **bin_buf, u32 num_args);
  2382. void bpf_bprintf_cleanup(void);
  2383. /* the implementation of the opaque uapi struct bpf_dynptr */
  2384. struct bpf_dynptr_kern {
  2385. void *data;
  2386. /* Size represents the number of usable bytes of dynptr data.
  2387. * If for example the offset is at 4 for a local dynptr whose data is
  2388. * of type u64, the number of usable bytes is 4.
  2389. *
  2390. * The upper 8 bits are reserved. It is as follows:
  2391. * Bits 0 - 23 = size
  2392. * Bits 24 - 30 = dynptr type
  2393. * Bit 31 = whether dynptr is read-only
  2394. */
  2395. u32 size;
  2396. u32 offset;
  2397. } __aligned(8);
  2398. enum bpf_dynptr_type {
  2399. BPF_DYNPTR_TYPE_INVALID,
  2400. /* Points to memory that is local to the bpf program */
  2401. BPF_DYNPTR_TYPE_LOCAL,
  2402. /* Underlying data is a kernel-produced ringbuf record */
  2403. BPF_DYNPTR_TYPE_RINGBUF,
  2404. };
  2405. void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
  2406. enum bpf_dynptr_type type, u32 offset, u32 size);
  2407. void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
  2408. int bpf_dynptr_check_size(u32 size);
  2409. u32 bpf_dynptr_get_size(struct bpf_dynptr_kern *ptr);
  2410. #ifdef CONFIG_BPF_LSM
  2411. void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
  2412. void bpf_cgroup_atype_put(int cgroup_atype);
  2413. #else
  2414. static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
  2415. static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
  2416. #endif /* CONFIG_BPF_LSM */
  2417. struct key;
  2418. #ifdef CONFIG_KEYS
  2419. struct bpf_key {
  2420. struct key *key;
  2421. bool has_ref;
  2422. };
  2423. #endif /* CONFIG_KEYS */
  2424. #endif /* _LINUX_BPF_H */