bounds.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. {
  2. "subtraction bounds (map value) variant 1",
  3. .insns = {
  4. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  9. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  10. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  11. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
  12. BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
  13. BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
  14. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
  15. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
  16. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  17. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  18. BPF_EXIT_INSN(),
  19. BPF_MOV64_IMM(BPF_REG_0, 0),
  20. BPF_EXIT_INSN(),
  21. },
  22. .fixup_map_hash_8b = { 3 },
  23. .errstr = "R0 max value is outside of the allowed memory range",
  24. .result = REJECT,
  25. },
  26. {
  27. "subtraction bounds (map value) variant 2",
  28. .insns = {
  29. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  30. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  31. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  32. BPF_LD_MAP_FD(BPF_REG_1, 0),
  33. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  34. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  35. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  36. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
  37. BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
  38. BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
  39. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
  40. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  41. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  42. BPF_EXIT_INSN(),
  43. BPF_MOV64_IMM(BPF_REG_0, 0),
  44. BPF_EXIT_INSN(),
  45. },
  46. .fixup_map_hash_8b = { 3 },
  47. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  48. .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
  49. .result = REJECT,
  50. },
  51. {
  52. "check subtraction on pointers for unpriv",
  53. .insns = {
  54. BPF_MOV64_IMM(BPF_REG_0, 0),
  55. BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
  56. BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
  57. BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
  58. BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
  59. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  60. BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
  61. BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
  62. BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
  63. BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
  64. BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
  65. BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
  66. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  67. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  68. BPF_EXIT_INSN(),
  69. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
  70. BPF_MOV64_IMM(BPF_REG_0, 0),
  71. BPF_EXIT_INSN(),
  72. },
  73. .fixup_map_hash_8b = { 1, 9 },
  74. .result = ACCEPT,
  75. .result_unpriv = REJECT,
  76. .errstr_unpriv = "R9 pointer -= pointer prohibited",
  77. },
  78. {
  79. "bounds check based on zero-extended MOV",
  80. .insns = {
  81. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  82. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  83. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  84. BPF_LD_MAP_FD(BPF_REG_1, 0),
  85. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  86. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  87. /* r2 = 0x0000'0000'ffff'ffff */
  88. BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
  89. /* r2 = 0 */
  90. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
  91. /* no-op */
  92. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  93. /* access at offset 0 */
  94. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  95. /* exit */
  96. BPF_MOV64_IMM(BPF_REG_0, 0),
  97. BPF_EXIT_INSN(),
  98. },
  99. .fixup_map_hash_8b = { 3 },
  100. .result = ACCEPT
  101. },
  102. {
  103. "bounds check based on sign-extended MOV. test1",
  104. .insns = {
  105. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  106. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  107. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  108. BPF_LD_MAP_FD(BPF_REG_1, 0),
  109. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  110. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  111. /* r2 = 0xffff'ffff'ffff'ffff */
  112. BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
  113. /* r2 = 0xffff'ffff */
  114. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
  115. /* r0 = <oob pointer> */
  116. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  117. /* access to OOB pointer */
  118. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  119. /* exit */
  120. BPF_MOV64_IMM(BPF_REG_0, 0),
  121. BPF_EXIT_INSN(),
  122. },
  123. .fixup_map_hash_8b = { 3 },
  124. .errstr = "map_value pointer and 4294967295",
  125. .result = REJECT
  126. },
  127. {
  128. "bounds check based on sign-extended MOV. test2",
  129. .insns = {
  130. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  131. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  132. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  133. BPF_LD_MAP_FD(BPF_REG_1, 0),
  134. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  135. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  136. /* r2 = 0xffff'ffff'ffff'ffff */
  137. BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
  138. /* r2 = 0xfff'ffff */
  139. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
  140. /* r0 = <oob pointer> */
  141. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  142. /* access to OOB pointer */
  143. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  144. /* exit */
  145. BPF_MOV64_IMM(BPF_REG_0, 0),
  146. BPF_EXIT_INSN(),
  147. },
  148. .fixup_map_hash_8b = { 3 },
  149. .errstr = "R0 min value is outside of the allowed memory range",
  150. .result = REJECT
  151. },
  152. {
  153. "bounds check based on reg_off + var_off + insn_off. test1",
  154. .insns = {
  155. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  156. offsetof(struct __sk_buff, mark)),
  157. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  158. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  159. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  160. BPF_LD_MAP_FD(BPF_REG_1, 0),
  161. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  162. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  163. BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
  164. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
  165. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
  166. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
  167. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
  168. BPF_MOV64_IMM(BPF_REG_0, 0),
  169. BPF_EXIT_INSN(),
  170. },
  171. .fixup_map_hash_8b = { 4 },
  172. .errstr = "value_size=8 off=1073741825",
  173. .result = REJECT,
  174. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  175. },
  176. {
  177. "bounds check based on reg_off + var_off + insn_off. test2",
  178. .insns = {
  179. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  180. offsetof(struct __sk_buff, mark)),
  181. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  182. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  183. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  184. BPF_LD_MAP_FD(BPF_REG_1, 0),
  185. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  186. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  187. BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
  188. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
  189. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
  190. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
  191. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
  192. BPF_MOV64_IMM(BPF_REG_0, 0),
  193. BPF_EXIT_INSN(),
  194. },
  195. .fixup_map_hash_8b = { 4 },
  196. .errstr = "value 1073741823",
  197. .result = REJECT,
  198. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  199. },
  200. {
  201. "bounds check after truncation of non-boundary-crossing range",
  202. .insns = {
  203. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  204. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  205. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  206. BPF_LD_MAP_FD(BPF_REG_1, 0),
  207. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  208. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  209. /* r1 = [0x00, 0xff] */
  210. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  211. BPF_MOV64_IMM(BPF_REG_2, 1),
  212. /* r2 = 0x10'0000'0000 */
  213. BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
  214. /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
  215. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
  216. /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
  217. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
  218. /* r1 = [0x00, 0xff] */
  219. BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
  220. /* r1 = 0 */
  221. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
  222. /* no-op */
  223. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  224. /* access at offset 0 */
  225. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  226. /* exit */
  227. BPF_MOV64_IMM(BPF_REG_0, 0),
  228. BPF_EXIT_INSN(),
  229. },
  230. .fixup_map_hash_8b = { 3 },
  231. .result = ACCEPT
  232. },
  233. {
  234. "bounds check after truncation of boundary-crossing range (1)",
  235. .insns = {
  236. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  237. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  238. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  239. BPF_LD_MAP_FD(BPF_REG_1, 0),
  240. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  241. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  242. /* r1 = [0x00, 0xff] */
  243. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  244. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
  245. /* r1 = [0xffff'ff80, 0x1'0000'007f] */
  246. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
  247. /* r1 = [0xffff'ff80, 0xffff'ffff] or
  248. * [0x0000'0000, 0x0000'007f]
  249. */
  250. BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
  251. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
  252. /* r1 = [0x00, 0xff] or
  253. * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
  254. */
  255. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
  256. /* error on OOB pointer computation */
  257. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  258. /* exit */
  259. BPF_MOV64_IMM(BPF_REG_0, 0),
  260. BPF_EXIT_INSN(),
  261. },
  262. .fixup_map_hash_8b = { 3 },
  263. /* not actually fully unbounded, but the bound is very high */
  264. .errstr = "value -4294967168 makes map_value pointer be out of bounds",
  265. .result = REJECT,
  266. },
  267. {
  268. "bounds check after truncation of boundary-crossing range (2)",
  269. .insns = {
  270. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  271. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  272. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  273. BPF_LD_MAP_FD(BPF_REG_1, 0),
  274. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  275. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  276. /* r1 = [0x00, 0xff] */
  277. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  278. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
  279. /* r1 = [0xffff'ff80, 0x1'0000'007f] */
  280. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
  281. /* r1 = [0xffff'ff80, 0xffff'ffff] or
  282. * [0x0000'0000, 0x0000'007f]
  283. * difference to previous test: truncation via MOV32
  284. * instead of ALU32.
  285. */
  286. BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
  287. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
  288. /* r1 = [0x00, 0xff] or
  289. * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
  290. */
  291. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
  292. /* error on OOB pointer computation */
  293. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  294. /* exit */
  295. BPF_MOV64_IMM(BPF_REG_0, 0),
  296. BPF_EXIT_INSN(),
  297. },
  298. .fixup_map_hash_8b = { 3 },
  299. .errstr = "value -4294967168 makes map_value pointer be out of bounds",
  300. .result = REJECT,
  301. },
  302. {
  303. "bounds check after wrapping 32-bit addition",
  304. .insns = {
  305. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  306. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  307. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  308. BPF_LD_MAP_FD(BPF_REG_1, 0),
  309. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  310. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  311. /* r1 = 0x7fff'ffff */
  312. BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
  313. /* r1 = 0xffff'fffe */
  314. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
  315. /* r1 = 0 */
  316. BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
  317. /* no-op */
  318. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  319. /* access at offset 0 */
  320. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  321. /* exit */
  322. BPF_MOV64_IMM(BPF_REG_0, 0),
  323. BPF_EXIT_INSN(),
  324. },
  325. .fixup_map_hash_8b = { 3 },
  326. .result = ACCEPT
  327. },
  328. {
  329. "bounds check after shift with oversized count operand",
  330. .insns = {
  331. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  332. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  333. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  334. BPF_LD_MAP_FD(BPF_REG_1, 0),
  335. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  336. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  337. BPF_MOV64_IMM(BPF_REG_2, 32),
  338. BPF_MOV64_IMM(BPF_REG_1, 1),
  339. /* r1 = (u32)1 << (u32)32 = ? */
  340. BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
  341. /* r1 = [0x0000, 0xffff] */
  342. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
  343. /* computes unknown pointer, potentially OOB */
  344. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  345. /* potentially OOB access */
  346. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  347. /* exit */
  348. BPF_MOV64_IMM(BPF_REG_0, 0),
  349. BPF_EXIT_INSN(),
  350. },
  351. .fixup_map_hash_8b = { 3 },
  352. .errstr = "R0 max value is outside of the allowed memory range",
  353. .result = REJECT
  354. },
  355. {
  356. "bounds check after right shift of maybe-negative number",
  357. .insns = {
  358. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  359. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  360. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  361. BPF_LD_MAP_FD(BPF_REG_1, 0),
  362. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  363. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  364. /* r1 = [0x00, 0xff] */
  365. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  366. /* r1 = [-0x01, 0xfe] */
  367. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
  368. /* r1 = 0 or 0xff'ffff'ffff'ffff */
  369. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
  370. /* r1 = 0 or 0xffff'ffff'ffff */
  371. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
  372. /* computes unknown pointer, potentially OOB */
  373. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  374. /* potentially OOB access */
  375. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  376. /* exit */
  377. BPF_MOV64_IMM(BPF_REG_0, 0),
  378. BPF_EXIT_INSN(),
  379. },
  380. .fixup_map_hash_8b = { 3 },
  381. .errstr = "R0 unbounded memory access",
  382. .result = REJECT
  383. },
  384. {
  385. "bounds check after 32-bit right shift with 64-bit input",
  386. .insns = {
  387. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  388. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  389. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  390. BPF_LD_MAP_FD(BPF_REG_1, 0),
  391. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  392. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  393. /* r1 = 2 */
  394. BPF_MOV64_IMM(BPF_REG_1, 2),
  395. /* r1 = 1<<32 */
  396. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
  397. /* r1 = 0 (NOT 2!) */
  398. BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
  399. /* r1 = 0xffff'fffe (NOT 0!) */
  400. BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
  401. /* error on computing OOB pointer */
  402. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  403. /* exit */
  404. BPF_MOV64_IMM(BPF_REG_0, 0),
  405. BPF_EXIT_INSN(),
  406. },
  407. .fixup_map_hash_8b = { 3 },
  408. .errstr = "math between map_value pointer and 4294967294 is not allowed",
  409. .result = REJECT,
  410. },
  411. {
  412. "bounds check map access with off+size signed 32bit overflow. test1",
  413. .insns = {
  414. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  415. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  416. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  417. BPF_LD_MAP_FD(BPF_REG_1, 0),
  418. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  419. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  420. BPF_EXIT_INSN(),
  421. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
  422. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  423. BPF_JMP_A(0),
  424. BPF_EXIT_INSN(),
  425. },
  426. .fixup_map_hash_8b = { 3 },
  427. .errstr = "map_value pointer and 2147483646",
  428. .result = REJECT
  429. },
  430. {
  431. "bounds check map access with off+size signed 32bit overflow. test2",
  432. .insns = {
  433. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  434. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  435. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  436. BPF_LD_MAP_FD(BPF_REG_1, 0),
  437. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  438. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  439. BPF_EXIT_INSN(),
  440. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
  441. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
  442. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
  443. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  444. BPF_JMP_A(0),
  445. BPF_EXIT_INSN(),
  446. },
  447. .fixup_map_hash_8b = { 3 },
  448. .errstr = "pointer offset 1073741822",
  449. .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
  450. .result = REJECT
  451. },
  452. {
  453. "bounds check map access with off+size signed 32bit overflow. test3",
  454. .insns = {
  455. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  456. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  457. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  458. BPF_LD_MAP_FD(BPF_REG_1, 0),
  459. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  460. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  461. BPF_EXIT_INSN(),
  462. BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
  463. BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
  464. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
  465. BPF_JMP_A(0),
  466. BPF_EXIT_INSN(),
  467. },
  468. .fixup_map_hash_8b = { 3 },
  469. .errstr = "pointer offset -1073741822",
  470. .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
  471. .result = REJECT
  472. },
  473. {
  474. "bounds check map access with off+size signed 32bit overflow. test4",
  475. .insns = {
  476. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  477. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  478. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  479. BPF_LD_MAP_FD(BPF_REG_1, 0),
  480. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  481. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  482. BPF_EXIT_INSN(),
  483. BPF_MOV64_IMM(BPF_REG_1, 1000000),
  484. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
  485. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  486. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
  487. BPF_JMP_A(0),
  488. BPF_EXIT_INSN(),
  489. },
  490. .fixup_map_hash_8b = { 3 },
  491. .errstr = "map_value pointer and 1000000000000",
  492. .result = REJECT
  493. },
  494. {
  495. "bounds check mixed 32bit and 64bit arithmetic. test1",
  496. .insns = {
  497. BPF_MOV64_IMM(BPF_REG_0, 0),
  498. BPF_MOV64_IMM(BPF_REG_1, -1),
  499. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
  500. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  501. /* r1 = 0xffffFFFF00000001 */
  502. BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 1, 3),
  503. /* check ALU64 op keeps 32bit bounds */
  504. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  505. BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 2, 1),
  506. BPF_JMP_A(1),
  507. /* invalid ldx if bounds are lost above */
  508. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
  509. BPF_EXIT_INSN(),
  510. },
  511. .errstr_unpriv = "R0 invalid mem access 'scalar'",
  512. .result_unpriv = REJECT,
  513. .result = ACCEPT
  514. },
  515. {
  516. "bounds check mixed 32bit and 64bit arithmetic. test2",
  517. .insns = {
  518. BPF_MOV64_IMM(BPF_REG_0, 0),
  519. BPF_MOV64_IMM(BPF_REG_1, -1),
  520. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
  521. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  522. /* r1 = 0xffffFFFF00000001 */
  523. BPF_MOV64_IMM(BPF_REG_2, 3),
  524. /* r1 = 0x2 */
  525. BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
  526. /* check ALU32 op zero extends 64bit bounds */
  527. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 1),
  528. BPF_JMP_A(1),
  529. /* invalid ldx if bounds are lost above */
  530. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
  531. BPF_EXIT_INSN(),
  532. },
  533. .errstr_unpriv = "R0 invalid mem access 'scalar'",
  534. .result_unpriv = REJECT,
  535. .result = ACCEPT
  536. },
  537. {
  538. "assigning 32bit bounds to 64bit for wA = 0, wB = wA",
  539. .insns = {
  540. BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
  541. offsetof(struct __sk_buff, data_end)),
  542. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  543. offsetof(struct __sk_buff, data)),
  544. BPF_MOV32_IMM(BPF_REG_9, 0),
  545. BPF_MOV32_REG(BPF_REG_2, BPF_REG_9),
  546. BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
  547. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2),
  548. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  549. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
  550. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1),
  551. BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0),
  552. BPF_MOV64_IMM(BPF_REG_0, 0),
  553. BPF_EXIT_INSN(),
  554. },
  555. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  556. .result = ACCEPT,
  557. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  558. },
  559. {
  560. "bounds check for reg = 0, reg xor 1",
  561. .insns = {
  562. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  563. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  564. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  565. BPF_LD_MAP_FD(BPF_REG_1, 0),
  566. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  567. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  568. BPF_EXIT_INSN(),
  569. BPF_MOV64_IMM(BPF_REG_1, 0),
  570. BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 1),
  571. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  572. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
  573. BPF_MOV64_IMM(BPF_REG_0, 0),
  574. BPF_EXIT_INSN(),
  575. },
  576. .errstr_unpriv = "R0 min value is outside of the allowed memory range",
  577. .result_unpriv = REJECT,
  578. .fixup_map_hash_8b = { 3 },
  579. .result = ACCEPT,
  580. },
  581. {
  582. "bounds check for reg32 = 0, reg32 xor 1",
  583. .insns = {
  584. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  585. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  586. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  587. BPF_LD_MAP_FD(BPF_REG_1, 0),
  588. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  589. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  590. BPF_EXIT_INSN(),
  591. BPF_MOV32_IMM(BPF_REG_1, 0),
  592. BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 1),
  593. BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  594. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
  595. BPF_MOV64_IMM(BPF_REG_0, 0),
  596. BPF_EXIT_INSN(),
  597. },
  598. .errstr_unpriv = "R0 min value is outside of the allowed memory range",
  599. .result_unpriv = REJECT,
  600. .fixup_map_hash_8b = { 3 },
  601. .result = ACCEPT,
  602. },
  603. {
  604. "bounds check for reg = 2, reg xor 3",
  605. .insns = {
  606. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  607. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  608. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  609. BPF_LD_MAP_FD(BPF_REG_1, 0),
  610. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  611. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  612. BPF_EXIT_INSN(),
  613. BPF_MOV64_IMM(BPF_REG_1, 2),
  614. BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3),
  615. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0, 1),
  616. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
  617. BPF_MOV64_IMM(BPF_REG_0, 0),
  618. BPF_EXIT_INSN(),
  619. },
  620. .errstr_unpriv = "R0 min value is outside of the allowed memory range",
  621. .result_unpriv = REJECT,
  622. .fixup_map_hash_8b = { 3 },
  623. .result = ACCEPT,
  624. },
  625. {
  626. "bounds check for reg = any, reg xor 3",
  627. .insns = {
  628. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  629. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  630. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  631. BPF_LD_MAP_FD(BPF_REG_1, 0),
  632. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  633. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  634. BPF_EXIT_INSN(),
  635. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  636. BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3),
  637. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  638. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
  639. BPF_MOV64_IMM(BPF_REG_0, 0),
  640. BPF_EXIT_INSN(),
  641. },
  642. .fixup_map_hash_8b = { 3 },
  643. .result = REJECT,
  644. .errstr = "invalid access to map value",
  645. .errstr_unpriv = "invalid access to map value",
  646. },
  647. {
  648. "bounds check for reg32 = any, reg32 xor 3",
  649. .insns = {
  650. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  651. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  652. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  653. BPF_LD_MAP_FD(BPF_REG_1, 0),
  654. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  655. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  656. BPF_EXIT_INSN(),
  657. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  658. BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3),
  659. BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  660. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
  661. BPF_MOV64_IMM(BPF_REG_0, 0),
  662. BPF_EXIT_INSN(),
  663. },
  664. .fixup_map_hash_8b = { 3 },
  665. .result = REJECT,
  666. .errstr = "invalid access to map value",
  667. .errstr_unpriv = "invalid access to map value",
  668. },
  669. {
  670. "bounds check for reg > 0, reg xor 3",
  671. .insns = {
  672. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  673. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  674. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  675. BPF_LD_MAP_FD(BPF_REG_1, 0),
  676. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  677. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  678. BPF_EXIT_INSN(),
  679. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  680. BPF_JMP_IMM(BPF_JLE, BPF_REG_1, 0, 3),
  681. BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3),
  682. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 1),
  683. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
  684. BPF_MOV64_IMM(BPF_REG_0, 0),
  685. BPF_EXIT_INSN(),
  686. },
  687. .errstr_unpriv = "R0 min value is outside of the allowed memory range",
  688. .result_unpriv = REJECT,
  689. .fixup_map_hash_8b = { 3 },
  690. .result = ACCEPT,
  691. },
  692. {
  693. "bounds check for reg32 > 0, reg32 xor 3",
  694. .insns = {
  695. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  696. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  697. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  698. BPF_LD_MAP_FD(BPF_REG_1, 0),
  699. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  700. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  701. BPF_EXIT_INSN(),
  702. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  703. BPF_JMP32_IMM(BPF_JLE, BPF_REG_1, 0, 3),
  704. BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3),
  705. BPF_JMP32_IMM(BPF_JGE, BPF_REG_1, 0, 1),
  706. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
  707. BPF_MOV64_IMM(BPF_REG_0, 0),
  708. BPF_EXIT_INSN(),
  709. },
  710. .errstr_unpriv = "R0 min value is outside of the allowed memory range",
  711. .result_unpriv = REJECT,
  712. .fixup_map_hash_8b = { 3 },
  713. .result = ACCEPT,
  714. },
  715. {
  716. "bounds checks after 32-bit truncation. test 1",
  717. .insns = {
  718. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  719. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  720. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  721. BPF_LD_MAP_FD(BPF_REG_1, 0),
  722. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  723. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  724. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  725. /* This used to reduce the max bound to 0x7fffffff */
  726. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  727. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0x7fffffff, 1),
  728. BPF_MOV64_IMM(BPF_REG_0, 0),
  729. BPF_EXIT_INSN(),
  730. },
  731. .fixup_map_hash_8b = { 3 },
  732. .errstr_unpriv = "R0 leaks addr",
  733. .result_unpriv = REJECT,
  734. .result = ACCEPT,
  735. },
  736. {
  737. "bounds checks after 32-bit truncation. test 2",
  738. .insns = {
  739. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  740. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  741. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  742. BPF_LD_MAP_FD(BPF_REG_1, 0),
  743. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  744. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  745. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  746. BPF_JMP_IMM(BPF_JSLT, BPF_REG_1, 1, 1),
  747. BPF_JMP32_IMM(BPF_JSLT, BPF_REG_1, 0, 1),
  748. BPF_MOV64_IMM(BPF_REG_0, 0),
  749. BPF_EXIT_INSN(),
  750. },
  751. .fixup_map_hash_8b = { 3 },
  752. .errstr_unpriv = "R0 leaks addr",
  753. .result_unpriv = REJECT,
  754. .result = ACCEPT,
  755. },