calls.c 68 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307
  1. {
  2. "calls: invalid kfunc call not eliminated",
  3. .insns = {
  4. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  5. BPF_MOV64_IMM(BPF_REG_0, 1),
  6. BPF_EXIT_INSN(),
  7. },
  8. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  9. .result = REJECT,
  10. .errstr = "invalid kernel function call not eliminated in verifier pass",
  11. },
  12. {
  13. "calls: invalid kfunc call unreachable",
  14. .insns = {
  15. BPF_MOV64_IMM(BPF_REG_0, 1),
  16. BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
  17. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  18. BPF_MOV64_IMM(BPF_REG_0, 1),
  19. BPF_EXIT_INSN(),
  20. },
  21. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  22. .result = ACCEPT,
  23. },
  24. {
  25. "calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
  26. .insns = {
  27. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  28. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  29. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  30. BPF_EXIT_INSN(),
  31. },
  32. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  33. .result = REJECT,
  34. .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
  35. .fixup_kfunc_btf_id = {
  36. { "bpf_kfunc_call_test_fail1", 2 },
  37. },
  38. },
  39. {
  40. "calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
  41. .insns = {
  42. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  43. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  44. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  45. BPF_EXIT_INSN(),
  46. },
  47. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  48. .result = REJECT,
  49. .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
  50. .fixup_kfunc_btf_id = {
  51. { "bpf_kfunc_call_test_fail2", 2 },
  52. },
  53. },
  54. {
  55. "calls: invalid kfunc call: ptr_to_mem to struct with FAM",
  56. .insns = {
  57. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  58. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  59. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  60. BPF_EXIT_INSN(),
  61. },
  62. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  63. .result = REJECT,
  64. .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
  65. .fixup_kfunc_btf_id = {
  66. { "bpf_kfunc_call_test_fail3", 2 },
  67. },
  68. },
  69. {
  70. "calls: invalid kfunc call: reg->type != PTR_TO_CTX",
  71. .insns = {
  72. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  73. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  74. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  75. BPF_EXIT_INSN(),
  76. },
  77. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  78. .result = REJECT,
  79. .errstr = "arg#0 expected pointer to ctx, but got PTR",
  80. .fixup_kfunc_btf_id = {
  81. { "bpf_kfunc_call_test_pass_ctx", 2 },
  82. },
  83. },
  84. {
  85. "calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
  86. .insns = {
  87. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  88. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  89. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  90. BPF_EXIT_INSN(),
  91. },
  92. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  93. .result = REJECT,
  94. .errstr = "arg#0 pointer type UNKNOWN must point to scalar",
  95. .fixup_kfunc_btf_id = {
  96. { "bpf_kfunc_call_test_mem_len_fail1", 2 },
  97. },
  98. },
  99. {
  100. "calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
  101. .insns = {
  102. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  103. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  104. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
  105. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  106. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  107. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  108. BPF_EXIT_INSN(),
  109. },
  110. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  111. .result = REJECT,
  112. .errstr = "arg#0 pointer type STRUCT prog_test_ref_kfunc must point",
  113. .fixup_kfunc_btf_id = {
  114. { "bpf_kfunc_call_test_acquire", 3 },
  115. { "bpf_kfunc_call_test_release", 5 },
  116. },
  117. },
  118. {
  119. "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
  120. .insns = {
  121. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  122. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  123. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
  124. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  125. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  126. BPF_EXIT_INSN(),
  127. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  128. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  129. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  130. BPF_MOV64_IMM(BPF_REG_0, 0),
  131. BPF_EXIT_INSN(),
  132. },
  133. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  134. .result = REJECT,
  135. .errstr = "R1 must have zero offset when passed to release func",
  136. .fixup_kfunc_btf_id = {
  137. { "bpf_kfunc_call_test_acquire", 3 },
  138. { "bpf_kfunc_call_memb_release", 8 },
  139. },
  140. },
  141. {
  142. "calls: invalid kfunc call: don't match first member type when passed to release kfunc",
  143. .insns = {
  144. BPF_MOV64_IMM(BPF_REG_0, 0),
  145. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  146. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  147. BPF_EXIT_INSN(),
  148. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  149. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  150. BPF_MOV64_IMM(BPF_REG_0, 0),
  151. BPF_EXIT_INSN(),
  152. },
  153. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  154. .result = REJECT,
  155. .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
  156. .fixup_kfunc_btf_id = {
  157. { "bpf_kfunc_call_memb_acquire", 1 },
  158. { "bpf_kfunc_call_memb1_release", 5 },
  159. },
  160. },
  161. {
  162. "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
  163. .insns = {
  164. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  165. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  166. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
  167. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  168. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  169. BPF_EXIT_INSN(),
  170. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  171. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16),
  172. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
  173. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  174. BPF_MOV64_IMM(BPF_REG_0, 0),
  175. BPF_EXIT_INSN(),
  176. },
  177. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  178. .fixup_kfunc_btf_id = {
  179. { "bpf_kfunc_call_test_acquire", 3 },
  180. { "bpf_kfunc_call_test_release", 9 },
  181. },
  182. .result_unpriv = REJECT,
  183. .result = REJECT,
  184. .errstr = "negative offset ptr_ ptr R1 off=-4 disallowed",
  185. },
  186. {
  187. "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
  188. .insns = {
  189. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  190. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  191. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
  192. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  193. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  194. BPF_EXIT_INSN(),
  195. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  196. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
  197. BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
  198. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  199. BPF_MOV64_IMM(BPF_REG_0, 0),
  200. BPF_EXIT_INSN(),
  201. BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
  202. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  203. BPF_MOV64_IMM(BPF_REG_0, 0),
  204. BPF_EXIT_INSN(),
  205. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
  206. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  207. BPF_MOV64_IMM(BPF_REG_0, 0),
  208. BPF_EXIT_INSN(),
  209. },
  210. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  211. .fixup_kfunc_btf_id = {
  212. { "bpf_kfunc_call_test_acquire", 3 },
  213. { "bpf_kfunc_call_test_release", 9 },
  214. { "bpf_kfunc_call_test_release", 13 },
  215. { "bpf_kfunc_call_test_release", 17 },
  216. },
  217. .result_unpriv = REJECT,
  218. .result = REJECT,
  219. .errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
  220. },
  221. {
  222. "calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
  223. .insns = {
  224. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  225. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  226. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
  227. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  228. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  229. BPF_EXIT_INSN(),
  230. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  231. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  232. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  233. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16),
  234. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  235. BPF_MOV64_IMM(BPF_REG_0, 0),
  236. BPF_EXIT_INSN(),
  237. },
  238. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  239. .fixup_kfunc_btf_id = {
  240. { "bpf_kfunc_call_test_acquire", 3 },
  241. { "bpf_kfunc_call_test_ref", 8 },
  242. { "bpf_kfunc_call_test_ref", 10 },
  243. },
  244. .result_unpriv = REJECT,
  245. .result = REJECT,
  246. .errstr = "R1 must be referenced",
  247. },
  248. {
  249. "calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
  250. .insns = {
  251. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  252. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  253. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
  254. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  255. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  256. BPF_EXIT_INSN(),
  257. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  258. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  259. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  260. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  261. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
  262. BPF_MOV64_IMM(BPF_REG_0, 0),
  263. BPF_EXIT_INSN(),
  264. },
  265. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  266. .fixup_kfunc_btf_id = {
  267. { "bpf_kfunc_call_test_acquire", 3 },
  268. { "bpf_kfunc_call_test_ref", 8 },
  269. { "bpf_kfunc_call_test_release", 10 },
  270. },
  271. .result_unpriv = REJECT,
  272. .result = ACCEPT,
  273. },
  274. {
  275. "calls: basic sanity",
  276. .insns = {
  277. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  278. BPF_MOV64_IMM(BPF_REG_0, 1),
  279. BPF_EXIT_INSN(),
  280. BPF_MOV64_IMM(BPF_REG_0, 2),
  281. BPF_EXIT_INSN(),
  282. },
  283. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  284. .result = ACCEPT,
  285. },
  286. {
  287. "calls: not on unprivileged",
  288. .insns = {
  289. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  290. BPF_MOV64_IMM(BPF_REG_0, 1),
  291. BPF_EXIT_INSN(),
  292. BPF_MOV64_IMM(BPF_REG_0, 2),
  293. BPF_EXIT_INSN(),
  294. },
  295. .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
  296. .result_unpriv = REJECT,
  297. .result = ACCEPT,
  298. .retval = 1,
  299. },
  300. {
  301. "calls: div by 0 in subprog",
  302. .insns = {
  303. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  304. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
  305. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  306. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  307. offsetof(struct __sk_buff, data_end)),
  308. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  309. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  310. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  311. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  312. BPF_MOV64_IMM(BPF_REG_0, 1),
  313. BPF_EXIT_INSN(),
  314. BPF_MOV32_IMM(BPF_REG_2, 0),
  315. BPF_MOV32_IMM(BPF_REG_3, 1),
  316. BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
  317. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  318. offsetof(struct __sk_buff, data)),
  319. BPF_EXIT_INSN(),
  320. },
  321. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  322. .result = ACCEPT,
  323. .retval = 1,
  324. },
  325. {
  326. "calls: multiple ret types in subprog 1",
  327. .insns = {
  328. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  329. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
  330. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  331. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  332. offsetof(struct __sk_buff, data_end)),
  333. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  334. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  335. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  336. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  337. BPF_MOV64_IMM(BPF_REG_0, 1),
  338. BPF_EXIT_INSN(),
  339. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  340. offsetof(struct __sk_buff, data)),
  341. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  342. BPF_MOV32_IMM(BPF_REG_0, 42),
  343. BPF_EXIT_INSN(),
  344. },
  345. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  346. .result = REJECT,
  347. .errstr = "R0 invalid mem access 'scalar'",
  348. },
  349. {
  350. "calls: multiple ret types in subprog 2",
  351. .insns = {
  352. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  353. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
  354. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  355. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  356. offsetof(struct __sk_buff, data_end)),
  357. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  358. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  359. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  360. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  361. BPF_MOV64_IMM(BPF_REG_0, 1),
  362. BPF_EXIT_INSN(),
  363. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  364. offsetof(struct __sk_buff, data)),
  365. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  366. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
  367. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  368. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  369. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  370. BPF_LD_MAP_FD(BPF_REG_1, 0),
  371. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  372. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  373. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
  374. offsetof(struct __sk_buff, data)),
  375. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
  376. BPF_EXIT_INSN(),
  377. },
  378. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  379. .fixup_map_hash_8b = { 16 },
  380. .result = REJECT,
  381. .errstr = "R0 min value is outside of the allowed memory range",
  382. },
  383. {
  384. "calls: overlapping caller/callee",
  385. .insns = {
  386. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
  387. BPF_MOV64_IMM(BPF_REG_0, 1),
  388. BPF_EXIT_INSN(),
  389. },
  390. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  391. .errstr = "last insn is not an exit or jmp",
  392. .result = REJECT,
  393. },
  394. {
  395. "calls: wrong recursive calls",
  396. .insns = {
  397. BPF_JMP_IMM(BPF_JA, 0, 0, 4),
  398. BPF_JMP_IMM(BPF_JA, 0, 0, 4),
  399. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
  400. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
  401. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
  402. BPF_MOV64_IMM(BPF_REG_0, 1),
  403. BPF_EXIT_INSN(),
  404. },
  405. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  406. .errstr = "jump out of range",
  407. .result = REJECT,
  408. },
  409. {
  410. "calls: wrong src reg",
  411. .insns = {
  412. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
  413. BPF_MOV64_IMM(BPF_REG_0, 1),
  414. BPF_EXIT_INSN(),
  415. },
  416. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  417. .errstr = "BPF_CALL uses reserved fields",
  418. .result = REJECT,
  419. },
  420. {
  421. "calls: wrong off value",
  422. .insns = {
  423. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
  424. BPF_MOV64_IMM(BPF_REG_0, 1),
  425. BPF_EXIT_INSN(),
  426. BPF_MOV64_IMM(BPF_REG_0, 2),
  427. BPF_EXIT_INSN(),
  428. },
  429. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  430. .errstr = "BPF_CALL uses reserved fields",
  431. .result = REJECT,
  432. },
  433. {
  434. "calls: jump back loop",
  435. .insns = {
  436. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
  437. BPF_MOV64_IMM(BPF_REG_0, 1),
  438. BPF_EXIT_INSN(),
  439. },
  440. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  441. .errstr = "back-edge from insn 0 to 0",
  442. .result = REJECT,
  443. },
  444. {
  445. "calls: conditional call",
  446. .insns = {
  447. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  448. offsetof(struct __sk_buff, mark)),
  449. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  450. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  451. BPF_MOV64_IMM(BPF_REG_0, 1),
  452. BPF_EXIT_INSN(),
  453. BPF_MOV64_IMM(BPF_REG_0, 2),
  454. BPF_EXIT_INSN(),
  455. },
  456. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  457. .errstr = "jump out of range",
  458. .result = REJECT,
  459. },
  460. {
  461. "calls: conditional call 2",
  462. .insns = {
  463. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  464. offsetof(struct __sk_buff, mark)),
  465. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  466. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  467. BPF_MOV64_IMM(BPF_REG_0, 1),
  468. BPF_EXIT_INSN(),
  469. BPF_MOV64_IMM(BPF_REG_0, 2),
  470. BPF_EXIT_INSN(),
  471. BPF_MOV64_IMM(BPF_REG_0, 3),
  472. BPF_EXIT_INSN(),
  473. },
  474. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  475. .result = ACCEPT,
  476. },
  477. {
  478. "calls: conditional call 3",
  479. .insns = {
  480. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  481. offsetof(struct __sk_buff, mark)),
  482. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  483. BPF_JMP_IMM(BPF_JA, 0, 0, 4),
  484. BPF_MOV64_IMM(BPF_REG_0, 1),
  485. BPF_EXIT_INSN(),
  486. BPF_MOV64_IMM(BPF_REG_0, 1),
  487. BPF_JMP_IMM(BPF_JA, 0, 0, -6),
  488. BPF_MOV64_IMM(BPF_REG_0, 3),
  489. BPF_JMP_IMM(BPF_JA, 0, 0, -6),
  490. },
  491. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  492. .errstr_unpriv = "back-edge from insn",
  493. .result_unpriv = REJECT,
  494. .result = ACCEPT,
  495. .retval = 1,
  496. },
  497. {
  498. "calls: conditional call 4",
  499. .insns = {
  500. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  501. offsetof(struct __sk_buff, mark)),
  502. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  503. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  504. BPF_MOV64_IMM(BPF_REG_0, 1),
  505. BPF_EXIT_INSN(),
  506. BPF_MOV64_IMM(BPF_REG_0, 1),
  507. BPF_JMP_IMM(BPF_JA, 0, 0, -5),
  508. BPF_MOV64_IMM(BPF_REG_0, 3),
  509. BPF_EXIT_INSN(),
  510. },
  511. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  512. .result = ACCEPT,
  513. },
  514. {
  515. "calls: conditional call 5",
  516. .insns = {
  517. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  518. offsetof(struct __sk_buff, mark)),
  519. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  520. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  521. BPF_MOV64_IMM(BPF_REG_0, 1),
  522. BPF_EXIT_INSN(),
  523. BPF_MOV64_IMM(BPF_REG_0, 1),
  524. BPF_JMP_IMM(BPF_JA, 0, 0, -6),
  525. BPF_MOV64_IMM(BPF_REG_0, 3),
  526. BPF_EXIT_INSN(),
  527. },
  528. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  529. .result = ACCEPT,
  530. .retval = 1,
  531. },
  532. {
  533. "calls: conditional call 6",
  534. .insns = {
  535. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  536. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  537. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  538. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
  539. BPF_EXIT_INSN(),
  540. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  541. offsetof(struct __sk_buff, mark)),
  542. BPF_EXIT_INSN(),
  543. },
  544. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  545. .errstr = "infinite loop detected",
  546. .result = REJECT,
  547. },
  548. {
  549. "calls: using r0 returned by callee",
  550. .insns = {
  551. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  552. BPF_EXIT_INSN(),
  553. BPF_MOV64_IMM(BPF_REG_0, 2),
  554. BPF_EXIT_INSN(),
  555. },
  556. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  557. .result = ACCEPT,
  558. },
  559. {
  560. "calls: using uninit r0 from callee",
  561. .insns = {
  562. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  563. BPF_EXIT_INSN(),
  564. BPF_EXIT_INSN(),
  565. },
  566. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  567. .errstr = "!read_ok",
  568. .result = REJECT,
  569. },
  570. {
  571. "calls: callee is using r1",
  572. .insns = {
  573. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  574. BPF_EXIT_INSN(),
  575. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  576. offsetof(struct __sk_buff, len)),
  577. BPF_EXIT_INSN(),
  578. },
  579. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  580. .result = ACCEPT,
  581. .retval = TEST_DATA_LEN,
  582. },
  583. {
  584. "calls: callee using args1",
  585. .insns = {
  586. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  587. BPF_EXIT_INSN(),
  588. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  589. BPF_EXIT_INSN(),
  590. },
  591. .errstr_unpriv = "allowed for",
  592. .result_unpriv = REJECT,
  593. .result = ACCEPT,
  594. .retval = POINTER_VALUE,
  595. },
  596. {
  597. "calls: callee using wrong args2",
  598. .insns = {
  599. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  600. BPF_EXIT_INSN(),
  601. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  602. BPF_EXIT_INSN(),
  603. },
  604. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  605. .errstr = "R2 !read_ok",
  606. .result = REJECT,
  607. },
  608. {
  609. "calls: callee using two args",
  610. .insns = {
  611. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  612. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
  613. offsetof(struct __sk_buff, len)),
  614. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
  615. offsetof(struct __sk_buff, len)),
  616. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  617. BPF_EXIT_INSN(),
  618. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  619. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  620. BPF_EXIT_INSN(),
  621. },
  622. .errstr_unpriv = "allowed for",
  623. .result_unpriv = REJECT,
  624. .result = ACCEPT,
  625. .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
  626. },
  627. {
  628. "calls: callee changing pkt pointers",
  629. .insns = {
  630. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
  631. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  632. offsetof(struct xdp_md, data_end)),
  633. BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
  634. BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
  635. BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
  636. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  637. /* clear_all_pkt_pointers() has to walk all frames
  638. * to make sure that pkt pointers in the caller
  639. * are cleared when callee is calling a helper that
  640. * adjusts packet size
  641. */
  642. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  643. BPF_MOV32_IMM(BPF_REG_0, 0),
  644. BPF_EXIT_INSN(),
  645. BPF_MOV64_IMM(BPF_REG_2, 0),
  646. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
  647. BPF_EXIT_INSN(),
  648. },
  649. .result = REJECT,
  650. .errstr = "R6 invalid mem access 'scalar'",
  651. .prog_type = BPF_PROG_TYPE_XDP,
  652. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  653. },
  654. {
  655. "calls: ptr null check in subprog",
  656. .insns = {
  657. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  658. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  659. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  660. BPF_LD_MAP_FD(BPF_REG_1, 0),
  661. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  662. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  663. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  664. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  665. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  666. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
  667. BPF_EXIT_INSN(),
  668. BPF_MOV64_IMM(BPF_REG_0, 0),
  669. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  670. BPF_MOV64_IMM(BPF_REG_0, 1),
  671. BPF_EXIT_INSN(),
  672. },
  673. .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
  674. .fixup_map_hash_48b = { 3 },
  675. .result_unpriv = REJECT,
  676. .result = ACCEPT,
  677. .retval = 0,
  678. },
  679. {
  680. "calls: two calls with args",
  681. .insns = {
  682. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  683. BPF_EXIT_INSN(),
  684. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  685. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
  686. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  687. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  688. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  689. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
  690. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  691. BPF_EXIT_INSN(),
  692. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  693. offsetof(struct __sk_buff, len)),
  694. BPF_EXIT_INSN(),
  695. },
  696. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  697. .result = ACCEPT,
  698. .retval = TEST_DATA_LEN + TEST_DATA_LEN,
  699. },
  700. {
  701. "calls: calls with stack arith",
  702. .insns = {
  703. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  704. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
  705. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  706. BPF_EXIT_INSN(),
  707. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
  708. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  709. BPF_EXIT_INSN(),
  710. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
  711. BPF_MOV64_IMM(BPF_REG_0, 42),
  712. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  713. BPF_EXIT_INSN(),
  714. },
  715. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  716. .result = ACCEPT,
  717. .retval = 42,
  718. },
  719. {
  720. "calls: calls with misaligned stack access",
  721. .insns = {
  722. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  723. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
  724. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  725. BPF_EXIT_INSN(),
  726. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
  727. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  728. BPF_EXIT_INSN(),
  729. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
  730. BPF_MOV64_IMM(BPF_REG_0, 42),
  731. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  732. BPF_EXIT_INSN(),
  733. },
  734. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  735. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  736. .errstr = "misaligned stack access",
  737. .result = REJECT,
  738. },
  739. {
  740. "calls: calls control flow, jump test",
  741. .insns = {
  742. BPF_MOV64_IMM(BPF_REG_0, 42),
  743. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  744. BPF_MOV64_IMM(BPF_REG_0, 43),
  745. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  746. BPF_JMP_IMM(BPF_JA, 0, 0, -3),
  747. BPF_EXIT_INSN(),
  748. },
  749. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  750. .result = ACCEPT,
  751. .retval = 43,
  752. },
  753. {
  754. "calls: calls control flow, jump test 2",
  755. .insns = {
  756. BPF_MOV64_IMM(BPF_REG_0, 42),
  757. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  758. BPF_MOV64_IMM(BPF_REG_0, 43),
  759. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  760. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
  761. BPF_EXIT_INSN(),
  762. },
  763. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  764. .errstr = "jump out of range from insn 1 to 4",
  765. .result = REJECT,
  766. },
  767. {
  768. "calls: two calls with bad jump",
  769. .insns = {
  770. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  771. BPF_EXIT_INSN(),
  772. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  773. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
  774. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  775. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  776. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  777. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
  778. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  779. BPF_EXIT_INSN(),
  780. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  781. offsetof(struct __sk_buff, len)),
  782. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
  783. BPF_EXIT_INSN(),
  784. },
  785. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  786. .errstr = "jump out of range from insn 11 to 9",
  787. .result = REJECT,
  788. },
  789. {
  790. "calls: recursive call. test1",
  791. .insns = {
  792. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  793. BPF_EXIT_INSN(),
  794. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
  795. BPF_EXIT_INSN(),
  796. },
  797. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  798. .errstr = "back-edge",
  799. .result = REJECT,
  800. },
  801. {
  802. "calls: recursive call. test2",
  803. .insns = {
  804. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  805. BPF_EXIT_INSN(),
  806. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
  807. BPF_EXIT_INSN(),
  808. },
  809. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  810. .errstr = "back-edge",
  811. .result = REJECT,
  812. },
  813. {
  814. "calls: unreachable code",
  815. .insns = {
  816. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  817. BPF_EXIT_INSN(),
  818. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  819. BPF_EXIT_INSN(),
  820. BPF_MOV64_IMM(BPF_REG_0, 0),
  821. BPF_EXIT_INSN(),
  822. BPF_MOV64_IMM(BPF_REG_0, 0),
  823. BPF_EXIT_INSN(),
  824. },
  825. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  826. .errstr = "unreachable insn 6",
  827. .result = REJECT,
  828. },
  829. {
  830. "calls: invalid call",
  831. .insns = {
  832. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  833. BPF_EXIT_INSN(),
  834. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
  835. BPF_EXIT_INSN(),
  836. },
  837. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  838. .errstr = "invalid destination",
  839. .result = REJECT,
  840. },
  841. {
  842. "calls: invalid call 2",
  843. .insns = {
  844. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  845. BPF_EXIT_INSN(),
  846. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
  847. BPF_EXIT_INSN(),
  848. },
  849. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  850. .errstr = "invalid destination",
  851. .result = REJECT,
  852. },
  853. {
  854. "calls: jumping across function bodies. test1",
  855. .insns = {
  856. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  857. BPF_MOV64_IMM(BPF_REG_0, 0),
  858. BPF_EXIT_INSN(),
  859. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  860. BPF_EXIT_INSN(),
  861. },
  862. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  863. .errstr = "jump out of range",
  864. .result = REJECT,
  865. },
  866. {
  867. "calls: jumping across function bodies. test2",
  868. .insns = {
  869. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  870. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  871. BPF_MOV64_IMM(BPF_REG_0, 0),
  872. BPF_EXIT_INSN(),
  873. BPF_EXIT_INSN(),
  874. },
  875. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  876. .errstr = "jump out of range",
  877. .result = REJECT,
  878. },
  879. {
  880. "calls: call without exit",
  881. .insns = {
  882. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  883. BPF_EXIT_INSN(),
  884. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  885. BPF_EXIT_INSN(),
  886. BPF_MOV64_IMM(BPF_REG_0, 0),
  887. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
  888. },
  889. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  890. .errstr = "not an exit",
  891. .result = REJECT,
  892. },
  893. {
  894. "calls: call into middle of ld_imm64",
  895. .insns = {
  896. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  897. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  898. BPF_MOV64_IMM(BPF_REG_0, 0),
  899. BPF_EXIT_INSN(),
  900. BPF_LD_IMM64(BPF_REG_0, 0),
  901. BPF_EXIT_INSN(),
  902. },
  903. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  904. .errstr = "last insn",
  905. .result = REJECT,
  906. },
  907. {
  908. "calls: call into middle of other call",
  909. .insns = {
  910. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  911. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  912. BPF_MOV64_IMM(BPF_REG_0, 0),
  913. BPF_EXIT_INSN(),
  914. BPF_MOV64_IMM(BPF_REG_0, 0),
  915. BPF_MOV64_IMM(BPF_REG_0, 0),
  916. BPF_EXIT_INSN(),
  917. },
  918. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  919. .errstr = "last insn",
  920. .result = REJECT,
  921. },
  922. {
  923. "calls: subprog call with ld_abs in main prog",
  924. .insns = {
  925. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  926. BPF_LD_ABS(BPF_B, 0),
  927. BPF_LD_ABS(BPF_H, 0),
  928. BPF_LD_ABS(BPF_W, 0),
  929. BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
  930. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  931. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
  932. BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
  933. BPF_LD_ABS(BPF_B, 0),
  934. BPF_LD_ABS(BPF_H, 0),
  935. BPF_LD_ABS(BPF_W, 0),
  936. BPF_EXIT_INSN(),
  937. BPF_MOV64_IMM(BPF_REG_2, 1),
  938. BPF_MOV64_IMM(BPF_REG_3, 2),
  939. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
  940. BPF_EXIT_INSN(),
  941. },
  942. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  943. .result = ACCEPT,
  944. },
  945. {
  946. "calls: two calls with bad fallthrough",
  947. .insns = {
  948. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  949. BPF_EXIT_INSN(),
  950. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  951. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
  952. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  953. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  954. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  955. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
  956. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  957. BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
  958. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  959. offsetof(struct __sk_buff, len)),
  960. BPF_EXIT_INSN(),
  961. },
  962. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  963. .errstr = "not an exit",
  964. .result = REJECT,
  965. },
  966. {
  967. "calls: two calls with stack read",
  968. .insns = {
  969. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  970. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  971. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  972. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  973. BPF_EXIT_INSN(),
  974. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  975. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
  976. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  977. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  978. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  979. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
  980. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  981. BPF_EXIT_INSN(),
  982. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
  983. BPF_EXIT_INSN(),
  984. },
  985. .prog_type = BPF_PROG_TYPE_XDP,
  986. .result = ACCEPT,
  987. },
  988. {
  989. "calls: two calls with stack write",
  990. .insns = {
  991. /* main prog */
  992. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  993. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  994. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  995. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  996. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  997. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  998. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
  999. BPF_EXIT_INSN(),
  1000. /* subprog 1 */
  1001. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1002. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1003. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
  1004. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  1005. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  1006. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  1007. BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
  1008. BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
  1009. /* write into stack frame of main prog */
  1010. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  1011. BPF_EXIT_INSN(),
  1012. /* subprog 2 */
  1013. /* read from stack frame of main prog */
  1014. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
  1015. BPF_EXIT_INSN(),
  1016. },
  1017. .prog_type = BPF_PROG_TYPE_XDP,
  1018. .result = ACCEPT,
  1019. },
  1020. {
  1021. "calls: stack overflow using two frames (pre-call access)",
  1022. .insns = {
  1023. /* prog 1 */
  1024. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  1025. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
  1026. BPF_EXIT_INSN(),
  1027. /* prog 2 */
  1028. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  1029. BPF_MOV64_IMM(BPF_REG_0, 0),
  1030. BPF_EXIT_INSN(),
  1031. },
  1032. .prog_type = BPF_PROG_TYPE_XDP,
  1033. .errstr = "combined stack size",
  1034. .result = REJECT,
  1035. },
  1036. {
  1037. "calls: stack overflow using two frames (post-call access)",
  1038. .insns = {
  1039. /* prog 1 */
  1040. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
  1041. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  1042. BPF_EXIT_INSN(),
  1043. /* prog 2 */
  1044. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  1045. BPF_MOV64_IMM(BPF_REG_0, 0),
  1046. BPF_EXIT_INSN(),
  1047. },
  1048. .prog_type = BPF_PROG_TYPE_XDP,
  1049. .errstr = "combined stack size",
  1050. .result = REJECT,
  1051. },
  1052. {
  1053. "calls: stack depth check using three frames. test1",
  1054. .insns = {
  1055. /* main */
  1056. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
  1057. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
  1058. BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
  1059. BPF_MOV64_IMM(BPF_REG_0, 0),
  1060. BPF_EXIT_INSN(),
  1061. /* A */
  1062. BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
  1063. BPF_EXIT_INSN(),
  1064. /* B */
  1065. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
  1066. BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
  1067. BPF_EXIT_INSN(),
  1068. },
  1069. .prog_type = BPF_PROG_TYPE_XDP,
  1070. /* stack_main=32, stack_A=256, stack_B=64
  1071. * and max(main+A, main+A+B) < 512
  1072. */
  1073. .result = ACCEPT,
  1074. },
  1075. {
  1076. "calls: stack depth check using three frames. test2",
  1077. .insns = {
  1078. /* main */
  1079. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
  1080. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
  1081. BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
  1082. BPF_MOV64_IMM(BPF_REG_0, 0),
  1083. BPF_EXIT_INSN(),
  1084. /* A */
  1085. BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
  1086. BPF_EXIT_INSN(),
  1087. /* B */
  1088. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
  1089. BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
  1090. BPF_EXIT_INSN(),
  1091. },
  1092. .prog_type = BPF_PROG_TYPE_XDP,
  1093. /* stack_main=32, stack_A=64, stack_B=256
  1094. * and max(main+A, main+A+B) < 512
  1095. */
  1096. .result = ACCEPT,
  1097. },
  1098. {
  1099. "calls: stack depth check using three frames. test3",
  1100. .insns = {
  1101. /* main */
  1102. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1103. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
  1104. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  1105. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
  1106. BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
  1107. BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
  1108. BPF_MOV64_IMM(BPF_REG_0, 0),
  1109. BPF_EXIT_INSN(),
  1110. /* A */
  1111. BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
  1112. BPF_EXIT_INSN(),
  1113. BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
  1114. BPF_JMP_IMM(BPF_JA, 0, 0, -3),
  1115. /* B */
  1116. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
  1117. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
  1118. BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
  1119. BPF_EXIT_INSN(),
  1120. },
  1121. .prog_type = BPF_PROG_TYPE_XDP,
  1122. /* stack_main=64, stack_A=224, stack_B=256
  1123. * and max(main+A, main+A+B) > 512
  1124. */
  1125. .errstr = "combined stack",
  1126. .result = REJECT,
  1127. },
  1128. {
  1129. "calls: stack depth check using three frames. test4",
  1130. /* void main(void) {
  1131. * func1(0);
  1132. * func1(1);
  1133. * func2(1);
  1134. * }
  1135. * void func1(int alloc_or_recurse) {
  1136. * if (alloc_or_recurse) {
  1137. * frame_pointer[-300] = 1;
  1138. * } else {
  1139. * func2(alloc_or_recurse);
  1140. * }
  1141. * }
  1142. * void func2(int alloc_or_recurse) {
  1143. * if (alloc_or_recurse) {
  1144. * frame_pointer[-300] = 1;
  1145. * }
  1146. * }
  1147. */
  1148. .insns = {
  1149. /* main */
  1150. BPF_MOV64_IMM(BPF_REG_1, 0),
  1151. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
  1152. BPF_MOV64_IMM(BPF_REG_1, 1),
  1153. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
  1154. BPF_MOV64_IMM(BPF_REG_1, 1),
  1155. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
  1156. BPF_MOV64_IMM(BPF_REG_0, 0),
  1157. BPF_EXIT_INSN(),
  1158. /* A */
  1159. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  1160. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  1161. BPF_EXIT_INSN(),
  1162. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
  1163. BPF_EXIT_INSN(),
  1164. /* B */
  1165. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  1166. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  1167. BPF_EXIT_INSN(),
  1168. },
  1169. .prog_type = BPF_PROG_TYPE_XDP,
  1170. .result = REJECT,
  1171. .errstr = "combined stack",
  1172. },
  1173. {
  1174. "calls: stack depth check using three frames. test5",
  1175. .insns = {
  1176. /* main */
  1177. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
  1178. BPF_EXIT_INSN(),
  1179. /* A */
  1180. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
  1181. BPF_EXIT_INSN(),
  1182. /* B */
  1183. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
  1184. BPF_EXIT_INSN(),
  1185. /* C */
  1186. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
  1187. BPF_EXIT_INSN(),
  1188. /* D */
  1189. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
  1190. BPF_EXIT_INSN(),
  1191. /* E */
  1192. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
  1193. BPF_EXIT_INSN(),
  1194. /* F */
  1195. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
  1196. BPF_EXIT_INSN(),
  1197. /* G */
  1198. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
  1199. BPF_EXIT_INSN(),
  1200. /* H */
  1201. BPF_MOV64_IMM(BPF_REG_0, 0),
  1202. BPF_EXIT_INSN(),
  1203. },
  1204. .prog_type = BPF_PROG_TYPE_XDP,
  1205. .errstr = "call stack",
  1206. .result = REJECT,
  1207. },
  1208. {
  1209. "calls: stack depth check in dead code",
  1210. .insns = {
  1211. /* main */
  1212. BPF_MOV64_IMM(BPF_REG_1, 0),
  1213. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
  1214. BPF_EXIT_INSN(),
  1215. /* A */
  1216. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  1217. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
  1218. BPF_MOV64_IMM(BPF_REG_0, 0),
  1219. BPF_EXIT_INSN(),
  1220. /* B */
  1221. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
  1222. BPF_EXIT_INSN(),
  1223. /* C */
  1224. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
  1225. BPF_EXIT_INSN(),
  1226. /* D */
  1227. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
  1228. BPF_EXIT_INSN(),
  1229. /* E */
  1230. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
  1231. BPF_EXIT_INSN(),
  1232. /* F */
  1233. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
  1234. BPF_EXIT_INSN(),
  1235. /* G */
  1236. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
  1237. BPF_EXIT_INSN(),
  1238. /* H */
  1239. BPF_MOV64_IMM(BPF_REG_0, 0),
  1240. BPF_EXIT_INSN(),
  1241. },
  1242. .prog_type = BPF_PROG_TYPE_XDP,
  1243. .errstr = "call stack",
  1244. .result = REJECT,
  1245. },
  1246. {
  1247. "calls: spill into caller stack frame",
  1248. .insns = {
  1249. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1250. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1251. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1252. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  1253. BPF_EXIT_INSN(),
  1254. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
  1255. BPF_MOV64_IMM(BPF_REG_0, 0),
  1256. BPF_EXIT_INSN(),
  1257. },
  1258. .prog_type = BPF_PROG_TYPE_XDP,
  1259. .errstr = "cannot spill",
  1260. .result = REJECT,
  1261. },
  1262. {
  1263. "calls: write into caller stack frame",
  1264. .insns = {
  1265. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1266. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1267. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1268. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1269. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1270. BPF_EXIT_INSN(),
  1271. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
  1272. BPF_MOV64_IMM(BPF_REG_0, 0),
  1273. BPF_EXIT_INSN(),
  1274. },
  1275. .prog_type = BPF_PROG_TYPE_XDP,
  1276. .result = ACCEPT,
  1277. .retval = 42,
  1278. },
  1279. {
  1280. "calls: write into callee stack frame",
  1281. .insns = {
  1282. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1283. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  1284. BPF_EXIT_INSN(),
  1285. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  1286. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
  1287. BPF_EXIT_INSN(),
  1288. },
  1289. .prog_type = BPF_PROG_TYPE_XDP,
  1290. .errstr = "cannot return stack pointer",
  1291. .result = REJECT,
  1292. },
  1293. {
  1294. "calls: two calls with stack write and void return",
  1295. .insns = {
  1296. /* main prog */
  1297. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1298. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1299. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1300. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1301. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1302. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1303. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
  1304. BPF_EXIT_INSN(),
  1305. /* subprog 1 */
  1306. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1307. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1308. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  1309. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  1310. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  1311. BPF_EXIT_INSN(),
  1312. /* subprog 2 */
  1313. /* write into stack frame of main prog */
  1314. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
  1315. BPF_EXIT_INSN(), /* void return */
  1316. },
  1317. .prog_type = BPF_PROG_TYPE_XDP,
  1318. .result = ACCEPT,
  1319. },
  1320. {
  1321. "calls: ambiguous return value",
  1322. .insns = {
  1323. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1324. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
  1325. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  1326. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  1327. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1328. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  1329. BPF_EXIT_INSN(),
  1330. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  1331. BPF_MOV64_IMM(BPF_REG_0, 0),
  1332. BPF_EXIT_INSN(),
  1333. },
  1334. .errstr_unpriv = "allowed for",
  1335. .result_unpriv = REJECT,
  1336. .errstr = "R0 !read_ok",
  1337. .result = REJECT,
  1338. },
  1339. {
  1340. "calls: two calls that return map_value",
  1341. .insns = {
  1342. /* main prog */
  1343. /* pass fp-16, fp-8 into a function */
  1344. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1345. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1346. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1347. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1348. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
  1349. /* fetch map_value_ptr from the stack of this function */
  1350. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  1351. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  1352. /* write into map value */
  1353. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1354. /* fetch secound map_value_ptr from the stack */
  1355. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
  1356. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  1357. /* write into map value */
  1358. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1359. BPF_MOV64_IMM(BPF_REG_0, 0),
  1360. BPF_EXIT_INSN(),
  1361. /* subprog 1 */
  1362. /* call 3rd function twice */
  1363. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1364. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1365. /* first time with fp-8 */
  1366. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  1367. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  1368. /* second time with fp-16 */
  1369. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  1370. BPF_EXIT_INSN(),
  1371. /* subprog 2 */
  1372. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1373. /* lookup from map */
  1374. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1375. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1376. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1377. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1378. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1379. /* write map_value_ptr into stack frame of main prog */
  1380. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  1381. BPF_MOV64_IMM(BPF_REG_0, 0),
  1382. BPF_EXIT_INSN(), /* return 0 */
  1383. },
  1384. .prog_type = BPF_PROG_TYPE_XDP,
  1385. .fixup_map_hash_8b = { 23 },
  1386. .result = ACCEPT,
  1387. },
  1388. {
  1389. "calls: two calls that return map_value with bool condition",
  1390. .insns = {
  1391. /* main prog */
  1392. /* pass fp-16, fp-8 into a function */
  1393. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1394. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1395. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1396. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1397. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1398. BPF_MOV64_IMM(BPF_REG_0, 0),
  1399. BPF_EXIT_INSN(),
  1400. /* subprog 1 */
  1401. /* call 3rd function twice */
  1402. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1403. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1404. /* first time with fp-8 */
  1405. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
  1406. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
  1407. /* fetch map_value_ptr from the stack of this function */
  1408. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1409. /* write into map value */
  1410. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1411. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  1412. /* second time with fp-16 */
  1413. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  1414. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
  1415. /* fetch secound map_value_ptr from the stack */
  1416. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
  1417. /* write into map value */
  1418. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1419. BPF_EXIT_INSN(),
  1420. /* subprog 2 */
  1421. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1422. /* lookup from map */
  1423. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1424. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1425. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1426. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1427. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1428. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1429. BPF_MOV64_IMM(BPF_REG_0, 0),
  1430. BPF_EXIT_INSN(), /* return 0 */
  1431. /* write map_value_ptr into stack frame of main prog */
  1432. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  1433. BPF_MOV64_IMM(BPF_REG_0, 1),
  1434. BPF_EXIT_INSN(), /* return 1 */
  1435. },
  1436. .prog_type = BPF_PROG_TYPE_XDP,
  1437. .fixup_map_hash_8b = { 23 },
  1438. .result = ACCEPT,
  1439. },
  1440. {
  1441. "calls: two calls that return map_value with incorrect bool check",
  1442. .insns = {
  1443. /* main prog */
  1444. /* pass fp-16, fp-8 into a function */
  1445. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1446. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1447. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1448. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1449. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1450. BPF_MOV64_IMM(BPF_REG_0, 0),
  1451. BPF_EXIT_INSN(),
  1452. /* subprog 1 */
  1453. /* call 3rd function twice */
  1454. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1455. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1456. /* first time with fp-8 */
  1457. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
  1458. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
  1459. /* fetch map_value_ptr from the stack of this function */
  1460. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1461. /* write into map value */
  1462. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1463. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  1464. /* second time with fp-16 */
  1465. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  1466. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1467. /* fetch secound map_value_ptr from the stack */
  1468. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
  1469. /* write into map value */
  1470. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1471. BPF_EXIT_INSN(),
  1472. /* subprog 2 */
  1473. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1474. /* lookup from map */
  1475. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1476. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1477. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1478. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1479. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1480. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1481. BPF_MOV64_IMM(BPF_REG_0, 0),
  1482. BPF_EXIT_INSN(), /* return 0 */
  1483. /* write map_value_ptr into stack frame of main prog */
  1484. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  1485. BPF_MOV64_IMM(BPF_REG_0, 1),
  1486. BPF_EXIT_INSN(), /* return 1 */
  1487. },
  1488. .prog_type = BPF_PROG_TYPE_XDP,
  1489. .fixup_map_hash_8b = { 23 },
  1490. .result = REJECT,
  1491. .errstr = "invalid read from stack R7 off=-16 size=8",
  1492. },
  1493. {
  1494. "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
  1495. .insns = {
  1496. /* main prog */
  1497. /* pass fp-16, fp-8 into a function */
  1498. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1499. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1500. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1501. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1502. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1503. BPF_MOV64_IMM(BPF_REG_0, 0),
  1504. BPF_EXIT_INSN(),
  1505. /* subprog 1 */
  1506. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1507. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1508. /* 1st lookup from map */
  1509. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1510. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1511. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1512. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1513. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1514. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1515. BPF_MOV64_IMM(BPF_REG_8, 0),
  1516. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1517. /* write map_value_ptr into stack frame of main prog at fp-8 */
  1518. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  1519. BPF_MOV64_IMM(BPF_REG_8, 1),
  1520. /* 2nd lookup from map */
  1521. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
  1522. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1523. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1524. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
  1525. BPF_FUNC_map_lookup_elem),
  1526. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1527. BPF_MOV64_IMM(BPF_REG_9, 0),
  1528. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1529. /* write map_value_ptr into stack frame of main prog at fp-16 */
  1530. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  1531. BPF_MOV64_IMM(BPF_REG_9, 1),
  1532. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  1533. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
  1534. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  1535. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  1536. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  1537. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
  1538. BPF_EXIT_INSN(),
  1539. /* subprog 2 */
  1540. /* if arg2 == 1 do *arg1 = 0 */
  1541. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  1542. /* fetch map_value_ptr from the stack of this function */
  1543. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  1544. /* write into map value */
  1545. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1546. /* if arg4 == 1 do *arg3 = 0 */
  1547. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
  1548. /* fetch map_value_ptr from the stack of this function */
  1549. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  1550. /* write into map value */
  1551. BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
  1552. BPF_EXIT_INSN(),
  1553. },
  1554. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1555. .fixup_map_hash_8b = { 12, 22 },
  1556. .result = REJECT,
  1557. .errstr = "invalid access to map value, value_size=8 off=2 size=8",
  1558. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  1559. },
  1560. {
  1561. "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
  1562. .insns = {
  1563. /* main prog */
  1564. /* pass fp-16, fp-8 into a function */
  1565. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1566. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1567. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1568. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1569. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1570. BPF_MOV64_IMM(BPF_REG_0, 0),
  1571. BPF_EXIT_INSN(),
  1572. /* subprog 1 */
  1573. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1574. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1575. /* 1st lookup from map */
  1576. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1577. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1578. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1579. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1580. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1581. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1582. BPF_MOV64_IMM(BPF_REG_8, 0),
  1583. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1584. /* write map_value_ptr into stack frame of main prog at fp-8 */
  1585. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  1586. BPF_MOV64_IMM(BPF_REG_8, 1),
  1587. /* 2nd lookup from map */
  1588. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
  1589. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1590. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1591. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
  1592. BPF_FUNC_map_lookup_elem),
  1593. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1594. BPF_MOV64_IMM(BPF_REG_9, 0),
  1595. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1596. /* write map_value_ptr into stack frame of main prog at fp-16 */
  1597. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  1598. BPF_MOV64_IMM(BPF_REG_9, 1),
  1599. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  1600. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
  1601. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  1602. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  1603. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  1604. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
  1605. BPF_EXIT_INSN(),
  1606. /* subprog 2 */
  1607. /* if arg2 == 1 do *arg1 = 0 */
  1608. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  1609. /* fetch map_value_ptr from the stack of this function */
  1610. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  1611. /* write into map value */
  1612. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1613. /* if arg4 == 1 do *arg3 = 0 */
  1614. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
  1615. /* fetch map_value_ptr from the stack of this function */
  1616. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  1617. /* write into map value */
  1618. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1619. BPF_EXIT_INSN(),
  1620. },
  1621. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1622. .fixup_map_hash_8b = { 12, 22 },
  1623. .result = ACCEPT,
  1624. },
  1625. {
  1626. "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
  1627. .insns = {
  1628. /* main prog */
  1629. /* pass fp-16, fp-8 into a function */
  1630. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1631. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1632. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1633. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1634. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
  1635. BPF_MOV64_IMM(BPF_REG_0, 0),
  1636. BPF_EXIT_INSN(),
  1637. /* subprog 1 */
  1638. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1639. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1640. /* 1st lookup from map */
  1641. BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
  1642. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1643. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
  1644. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1645. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1646. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1647. BPF_MOV64_IMM(BPF_REG_8, 0),
  1648. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1649. /* write map_value_ptr into stack frame of main prog at fp-8 */
  1650. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  1651. BPF_MOV64_IMM(BPF_REG_8, 1),
  1652. /* 2nd lookup from map */
  1653. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1654. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
  1655. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1656. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1657. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1658. BPF_MOV64_IMM(BPF_REG_9, 0), // 26
  1659. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1660. /* write map_value_ptr into stack frame of main prog at fp-16 */
  1661. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  1662. BPF_MOV64_IMM(BPF_REG_9, 1),
  1663. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  1664. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
  1665. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  1666. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  1667. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  1668. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
  1669. BPF_JMP_IMM(BPF_JA, 0, 0, -30),
  1670. /* subprog 2 */
  1671. /* if arg2 == 1 do *arg1 = 0 */
  1672. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  1673. /* fetch map_value_ptr from the stack of this function */
  1674. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  1675. /* write into map value */
  1676. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1677. /* if arg4 == 1 do *arg3 = 0 */
  1678. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
  1679. /* fetch map_value_ptr from the stack of this function */
  1680. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  1681. /* write into map value */
  1682. BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
  1683. BPF_JMP_IMM(BPF_JA, 0, 0, -8),
  1684. },
  1685. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1686. .fixup_map_hash_8b = { 12, 22 },
  1687. .result = REJECT,
  1688. .errstr = "invalid access to map value, value_size=8 off=2 size=8",
  1689. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  1690. },
  1691. {
  1692. "calls: two calls that receive map_value_ptr_or_null via arg. test1",
  1693. .insns = {
  1694. /* main prog */
  1695. /* pass fp-16, fp-8 into a function */
  1696. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1697. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1698. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1699. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1700. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1701. BPF_MOV64_IMM(BPF_REG_0, 0),
  1702. BPF_EXIT_INSN(),
  1703. /* subprog 1 */
  1704. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1705. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1706. /* 1st lookup from map */
  1707. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1708. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1709. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1710. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1711. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1712. /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
  1713. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  1714. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1715. BPF_MOV64_IMM(BPF_REG_8, 0),
  1716. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  1717. BPF_MOV64_IMM(BPF_REG_8, 1),
  1718. /* 2nd lookup from map */
  1719. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1720. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1721. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1722. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1723. /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
  1724. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  1725. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1726. BPF_MOV64_IMM(BPF_REG_9, 0),
  1727. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  1728. BPF_MOV64_IMM(BPF_REG_9, 1),
  1729. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  1730. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  1731. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  1732. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  1733. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  1734. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  1735. BPF_EXIT_INSN(),
  1736. /* subprog 2 */
  1737. /* if arg2 == 1 do *arg1 = 0 */
  1738. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  1739. /* fetch map_value_ptr from the stack of this function */
  1740. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  1741. /* write into map value */
  1742. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1743. /* if arg4 == 1 do *arg3 = 0 */
  1744. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
  1745. /* fetch map_value_ptr from the stack of this function */
  1746. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  1747. /* write into map value */
  1748. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1749. BPF_EXIT_INSN(),
  1750. },
  1751. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1752. .fixup_map_hash_8b = { 12, 22 },
  1753. .result = ACCEPT,
  1754. },
  1755. {
  1756. "calls: two calls that receive map_value_ptr_or_null via arg. test2",
  1757. .insns = {
  1758. /* main prog */
  1759. /* pass fp-16, fp-8 into a function */
  1760. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1761. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1762. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1763. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1764. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  1765. BPF_MOV64_IMM(BPF_REG_0, 0),
  1766. BPF_EXIT_INSN(),
  1767. /* subprog 1 */
  1768. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  1769. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  1770. /* 1st lookup from map */
  1771. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1772. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1773. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1774. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1775. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1776. /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
  1777. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  1778. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1779. BPF_MOV64_IMM(BPF_REG_8, 0),
  1780. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  1781. BPF_MOV64_IMM(BPF_REG_8, 1),
  1782. /* 2nd lookup from map */
  1783. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1784. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1785. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1786. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1787. /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
  1788. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  1789. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  1790. BPF_MOV64_IMM(BPF_REG_9, 0),
  1791. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  1792. BPF_MOV64_IMM(BPF_REG_9, 1),
  1793. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  1794. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  1795. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  1796. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  1797. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  1798. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  1799. BPF_EXIT_INSN(),
  1800. /* subprog 2 */
  1801. /* if arg2 == 1 do *arg1 = 0 */
  1802. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  1803. /* fetch map_value_ptr from the stack of this function */
  1804. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  1805. /* write into map value */
  1806. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1807. /* if arg4 == 0 do *arg3 = 0 */
  1808. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
  1809. /* fetch map_value_ptr from the stack of this function */
  1810. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  1811. /* write into map value */
  1812. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1813. BPF_EXIT_INSN(),
  1814. },
  1815. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1816. .fixup_map_hash_8b = { 12, 22 },
  1817. .result = REJECT,
  1818. .errstr = "R0 invalid mem access 'scalar'",
  1819. },
  1820. {
  1821. "calls: pkt_ptr spill into caller stack",
  1822. .insns = {
  1823. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  1824. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  1825. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  1826. BPF_EXIT_INSN(),
  1827. /* subprog 1 */
  1828. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1829. offsetof(struct __sk_buff, data)),
  1830. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1831. offsetof(struct __sk_buff, data_end)),
  1832. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1833. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1834. /* spill unchecked pkt_ptr into stack of caller */
  1835. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  1836. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  1837. /* now the pkt range is verified, read pkt_ptr from stack */
  1838. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
  1839. /* write 4 bytes into packet */
  1840. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  1841. BPF_EXIT_INSN(),
  1842. },
  1843. .result = ACCEPT,
  1844. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1845. .retval = POINTER_VALUE,
  1846. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  1847. },
  1848. {
  1849. "calls: pkt_ptr spill into caller stack 2",
  1850. .insns = {
  1851. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  1852. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  1853. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  1854. /* Marking is still kept, but not in all cases safe. */
  1855. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  1856. BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
  1857. BPF_EXIT_INSN(),
  1858. /* subprog 1 */
  1859. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1860. offsetof(struct __sk_buff, data)),
  1861. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1862. offsetof(struct __sk_buff, data_end)),
  1863. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1864. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1865. /* spill unchecked pkt_ptr into stack of caller */
  1866. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  1867. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  1868. /* now the pkt range is verified, read pkt_ptr from stack */
  1869. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
  1870. /* write 4 bytes into packet */
  1871. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  1872. BPF_EXIT_INSN(),
  1873. },
  1874. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1875. .errstr = "invalid access to packet",
  1876. .result = REJECT,
  1877. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  1878. },
  1879. {
  1880. "calls: pkt_ptr spill into caller stack 3",
  1881. .insns = {
  1882. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  1883. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  1884. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  1885. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  1886. /* Marking is still kept and safe here. */
  1887. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  1888. BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
  1889. BPF_EXIT_INSN(),
  1890. /* subprog 1 */
  1891. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1892. offsetof(struct __sk_buff, data)),
  1893. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1894. offsetof(struct __sk_buff, data_end)),
  1895. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1896. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1897. /* spill unchecked pkt_ptr into stack of caller */
  1898. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  1899. BPF_MOV64_IMM(BPF_REG_5, 0),
  1900. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  1901. BPF_MOV64_IMM(BPF_REG_5, 1),
  1902. /* now the pkt range is verified, read pkt_ptr from stack */
  1903. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
  1904. /* write 4 bytes into packet */
  1905. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  1906. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  1907. BPF_EXIT_INSN(),
  1908. },
  1909. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1910. .result = ACCEPT,
  1911. .retval = 1,
  1912. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  1913. },
  1914. {
  1915. "calls: pkt_ptr spill into caller stack 4",
  1916. .insns = {
  1917. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  1918. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  1919. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  1920. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  1921. /* Check marking propagated. */
  1922. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  1923. BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
  1924. BPF_EXIT_INSN(),
  1925. /* subprog 1 */
  1926. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1927. offsetof(struct __sk_buff, data)),
  1928. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1929. offsetof(struct __sk_buff, data_end)),
  1930. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1931. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1932. /* spill unchecked pkt_ptr into stack of caller */
  1933. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  1934. BPF_MOV64_IMM(BPF_REG_5, 0),
  1935. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  1936. BPF_MOV64_IMM(BPF_REG_5, 1),
  1937. /* don't read back pkt_ptr from stack here */
  1938. /* write 4 bytes into packet */
  1939. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  1940. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  1941. BPF_EXIT_INSN(),
  1942. },
  1943. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1944. .result = ACCEPT,
  1945. .retval = 1,
  1946. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  1947. },
  1948. {
  1949. "calls: pkt_ptr spill into caller stack 5",
  1950. .insns = {
  1951. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  1952. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  1953. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
  1954. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  1955. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  1956. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  1957. BPF_EXIT_INSN(),
  1958. /* subprog 1 */
  1959. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1960. offsetof(struct __sk_buff, data)),
  1961. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1962. offsetof(struct __sk_buff, data_end)),
  1963. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1964. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1965. BPF_MOV64_IMM(BPF_REG_5, 0),
  1966. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  1967. /* spill checked pkt_ptr into stack of caller */
  1968. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  1969. BPF_MOV64_IMM(BPF_REG_5, 1),
  1970. /* don't read back pkt_ptr from stack here */
  1971. /* write 4 bytes into packet */
  1972. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  1973. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  1974. BPF_EXIT_INSN(),
  1975. },
  1976. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1977. .errstr = "same insn cannot be used with different",
  1978. .result = REJECT,
  1979. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  1980. },
  1981. {
  1982. "calls: pkt_ptr spill into caller stack 6",
  1983. .insns = {
  1984. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1985. offsetof(struct __sk_buff, data_end)),
  1986. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  1987. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  1988. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  1989. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  1990. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  1991. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  1992. BPF_EXIT_INSN(),
  1993. /* subprog 1 */
  1994. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1995. offsetof(struct __sk_buff, data)),
  1996. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1997. offsetof(struct __sk_buff, data_end)),
  1998. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1999. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2000. BPF_MOV64_IMM(BPF_REG_5, 0),
  2001. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  2002. /* spill checked pkt_ptr into stack of caller */
  2003. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  2004. BPF_MOV64_IMM(BPF_REG_5, 1),
  2005. /* don't read back pkt_ptr from stack here */
  2006. /* write 4 bytes into packet */
  2007. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  2008. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  2009. BPF_EXIT_INSN(),
  2010. },
  2011. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2012. .errstr = "R4 invalid mem access",
  2013. .result = REJECT,
  2014. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  2015. },
  2016. {
  2017. "calls: pkt_ptr spill into caller stack 7",
  2018. .insns = {
  2019. BPF_MOV64_IMM(BPF_REG_2, 0),
  2020. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  2021. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  2022. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  2023. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  2024. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  2025. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  2026. BPF_EXIT_INSN(),
  2027. /* subprog 1 */
  2028. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2029. offsetof(struct __sk_buff, data)),
  2030. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2031. offsetof(struct __sk_buff, data_end)),
  2032. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2033. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2034. BPF_MOV64_IMM(BPF_REG_5, 0),
  2035. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  2036. /* spill checked pkt_ptr into stack of caller */
  2037. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  2038. BPF_MOV64_IMM(BPF_REG_5, 1),
  2039. /* don't read back pkt_ptr from stack here */
  2040. /* write 4 bytes into packet */
  2041. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  2042. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  2043. BPF_EXIT_INSN(),
  2044. },
  2045. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2046. .errstr = "R4 invalid mem access",
  2047. .result = REJECT,
  2048. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  2049. },
  2050. {
  2051. "calls: pkt_ptr spill into caller stack 8",
  2052. .insns = {
  2053. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2054. offsetof(struct __sk_buff, data)),
  2055. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2056. offsetof(struct __sk_buff, data_end)),
  2057. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2058. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2059. BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
  2060. BPF_EXIT_INSN(),
  2061. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  2062. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  2063. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  2064. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  2065. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  2066. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  2067. BPF_EXIT_INSN(),
  2068. /* subprog 1 */
  2069. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2070. offsetof(struct __sk_buff, data)),
  2071. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2072. offsetof(struct __sk_buff, data_end)),
  2073. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2074. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2075. BPF_MOV64_IMM(BPF_REG_5, 0),
  2076. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  2077. /* spill checked pkt_ptr into stack of caller */
  2078. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  2079. BPF_MOV64_IMM(BPF_REG_5, 1),
  2080. /* don't read back pkt_ptr from stack here */
  2081. /* write 4 bytes into packet */
  2082. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  2083. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  2084. BPF_EXIT_INSN(),
  2085. },
  2086. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2087. .result = ACCEPT,
  2088. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  2089. },
  2090. {
  2091. "calls: pkt_ptr spill into caller stack 9",
  2092. .insns = {
  2093. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2094. offsetof(struct __sk_buff, data)),
  2095. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2096. offsetof(struct __sk_buff, data_end)),
  2097. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2098. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2099. BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
  2100. BPF_EXIT_INSN(),
  2101. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  2102. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  2103. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  2104. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  2105. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  2106. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  2107. BPF_EXIT_INSN(),
  2108. /* subprog 1 */
  2109. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2110. offsetof(struct __sk_buff, data)),
  2111. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2112. offsetof(struct __sk_buff, data_end)),
  2113. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2114. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2115. BPF_MOV64_IMM(BPF_REG_5, 0),
  2116. /* spill unchecked pkt_ptr into stack of caller */
  2117. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  2118. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  2119. BPF_MOV64_IMM(BPF_REG_5, 1),
  2120. /* don't read back pkt_ptr from stack here */
  2121. /* write 4 bytes into packet */
  2122. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  2123. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  2124. BPF_EXIT_INSN(),
  2125. },
  2126. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2127. .errstr = "invalid access to packet",
  2128. .result = REJECT,
  2129. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  2130. },
  2131. {
  2132. "calls: caller stack init to zero or map_value_or_null",
  2133. .insns = {
  2134. BPF_MOV64_IMM(BPF_REG_0, 0),
  2135. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  2136. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2137. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2138. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  2139. /* fetch map_value_or_null or const_zero from stack */
  2140. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  2141. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2142. /* store into map_value */
  2143. BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
  2144. BPF_EXIT_INSN(),
  2145. /* subprog 1 */
  2146. /* if (ctx == 0) return; */
  2147. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
  2148. /* else bpf_map_lookup() and *(fp - 8) = r0 */
  2149. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  2150. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2151. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2152. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2153. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2154. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2155. /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
  2156. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  2157. BPF_EXIT_INSN(),
  2158. },
  2159. .fixup_map_hash_8b = { 13 },
  2160. .result = ACCEPT,
  2161. .prog_type = BPF_PROG_TYPE_XDP,
  2162. },
  2163. {
  2164. "calls: stack init to zero and pruning",
  2165. .insns = {
  2166. /* first make allocated_stack 16 byte */
  2167. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
  2168. /* now fork the execution such that the false branch
  2169. * of JGT insn will be verified second and it skisp zero
  2170. * init of fp-8 stack slot. If stack liveness marking
  2171. * is missing live_read marks from call map_lookup
  2172. * processing then pruning will incorrectly assume
  2173. * that fp-8 stack slot was unused in the fall-through
  2174. * branch and will accept the program incorrectly
  2175. */
  2176. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
  2177. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2178. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  2179. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2180. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2181. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2182. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2183. BPF_EXIT_INSN(),
  2184. },
  2185. .fixup_map_hash_48b = { 6 },
  2186. .errstr = "invalid indirect read from stack R2 off -8+0 size 8",
  2187. .result = REJECT,
  2188. .prog_type = BPF_PROG_TYPE_XDP,
  2189. },
  2190. {
  2191. "calls: ctx read at start of subprog",
  2192. .insns = {
  2193. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  2194. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
  2195. BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
  2196. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2197. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  2198. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  2199. BPF_EXIT_INSN(),
  2200. BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
  2201. BPF_MOV64_IMM(BPF_REG_0, 0),
  2202. BPF_EXIT_INSN(),
  2203. },
  2204. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  2205. .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
  2206. .result_unpriv = REJECT,
  2207. .result = ACCEPT,
  2208. },
  2209. {
  2210. "calls: cross frame pruning",
  2211. .insns = {
  2212. /* r8 = !!random();
  2213. * call pruner()
  2214. * if (r8)
  2215. * do something bad;
  2216. */
  2217. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
  2218. BPF_MOV64_IMM(BPF_REG_8, 0),
  2219. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  2220. BPF_MOV64_IMM(BPF_REG_8, 1),
  2221. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  2222. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  2223. BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
  2224. BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
  2225. BPF_MOV64_IMM(BPF_REG_0, 0),
  2226. BPF_EXIT_INSN(),
  2227. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  2228. BPF_EXIT_INSN(),
  2229. },
  2230. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  2231. .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
  2232. .errstr = "!read_ok",
  2233. .result = REJECT,
  2234. },
  2235. {
  2236. "calls: cross frame pruning - liveness propagation",
  2237. .insns = {
  2238. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
  2239. BPF_MOV64_IMM(BPF_REG_8, 0),
  2240. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  2241. BPF_MOV64_IMM(BPF_REG_8, 1),
  2242. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
  2243. BPF_MOV64_IMM(BPF_REG_9, 0),
  2244. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  2245. BPF_MOV64_IMM(BPF_REG_9, 1),
  2246. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  2247. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  2248. BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
  2249. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
  2250. BPF_MOV64_IMM(BPF_REG_0, 0),
  2251. BPF_EXIT_INSN(),
  2252. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  2253. BPF_EXIT_INSN(),
  2254. },
  2255. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  2256. .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
  2257. .errstr = "!read_ok",
  2258. .result = REJECT,
  2259. },