test_verifier.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Testsuite for eBPF verifier
  4. *
  5. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  6. * Copyright (c) 2017 Facebook
  7. * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
  8. */
  9. #include <endian.h>
  10. #include <asm/types.h>
  11. #include <linux/types.h>
  12. #include <stdint.h>
  13. #include <stdio.h>
  14. #include <stdlib.h>
  15. #include <unistd.h>
  16. #include <errno.h>
  17. #include <string.h>
  18. #include <stddef.h>
  19. #include <stdbool.h>
  20. #include <sched.h>
  21. #include <limits.h>
  22. #include <assert.h>
  23. #include <linux/unistd.h>
  24. #include <linux/filter.h>
  25. #include <linux/bpf_perf_event.h>
  26. #include <linux/bpf.h>
  27. #include <linux/if_ether.h>
  28. #include <linux/btf.h>
  29. #include <bpf/btf.h>
  30. #include <bpf/bpf.h>
  31. #include <bpf/libbpf.h>
  32. #ifdef HAVE_GENHDR
  33. # include "autoconf.h"
  34. #else
  35. # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
  36. # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
  37. # endif
  38. #endif
  39. #include "cap_helpers.h"
  40. #include "bpf_rand.h"
  41. #include "bpf_util.h"
  42. #include "test_btf.h"
  43. #include "../../../include/linux/filter.h"
  44. #ifndef ENOTSUPP
  45. #define ENOTSUPP 524
  46. #endif
  47. #define MAX_INSNS BPF_MAXINSNS
  48. #define MAX_EXPECTED_INSNS 32
  49. #define MAX_UNEXPECTED_INSNS 32
  50. #define MAX_TEST_INSNS 1000000
  51. #define MAX_FIXUPS 8
  52. #define MAX_NR_MAPS 23
  53. #define MAX_TEST_RUNS 8
  54. #define POINTER_VALUE 0xcafe4all
  55. #define TEST_DATA_LEN 64
  56. #define MAX_FUNC_INFOS 8
  57. #define MAX_BTF_STRINGS 256
  58. #define MAX_BTF_TYPES 256
  59. #define INSN_OFF_MASK ((__s16)0xFFFF)
  60. #define INSN_IMM_MASK ((__s32)0xFFFFFFFF)
  61. #define SKIP_INSNS() BPF_RAW_INSN(0xde, 0xa, 0xd, 0xbeef, 0xdeadbeef)
  62. #define DEFAULT_LIBBPF_LOG_LEVEL 4
  63. #define VERBOSE_LIBBPF_LOG_LEVEL 1
  64. #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
  65. #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
  66. /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
  67. #define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \
  68. 1ULL << CAP_PERFMON | \
  69. 1ULL << CAP_BPF)
  70. #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
  71. static bool unpriv_disabled = false;
  72. static int skips;
  73. static bool verbose = false;
  74. struct kfunc_btf_id_pair {
  75. const char *kfunc;
  76. int insn_idx;
  77. };
  78. struct bpf_test {
  79. const char *descr;
  80. struct bpf_insn insns[MAX_INSNS];
  81. struct bpf_insn *fill_insns;
  82. /* If specified, test engine looks for this sequence of
  83. * instructions in the BPF program after loading. Allows to
  84. * test rewrites applied by verifier. Use values
  85. * INSN_OFF_MASK and INSN_IMM_MASK to mask `off` and `imm`
  86. * fields if content does not matter. The test case fails if
  87. * specified instructions are not found.
  88. *
  89. * The sequence could be split into sub-sequences by adding
  90. * SKIP_INSNS instruction at the end of each sub-sequence. In
  91. * such case sub-sequences are searched for one after another.
  92. */
  93. struct bpf_insn expected_insns[MAX_EXPECTED_INSNS];
  94. /* If specified, test engine applies same pattern matching
  95. * logic as for `expected_insns`. If the specified pattern is
  96. * matched test case is marked as failed.
  97. */
  98. struct bpf_insn unexpected_insns[MAX_UNEXPECTED_INSNS];
  99. int fixup_map_hash_8b[MAX_FIXUPS];
  100. int fixup_map_hash_48b[MAX_FIXUPS];
  101. int fixup_map_hash_16b[MAX_FIXUPS];
  102. int fixup_map_array_48b[MAX_FIXUPS];
  103. int fixup_map_sockmap[MAX_FIXUPS];
  104. int fixup_map_sockhash[MAX_FIXUPS];
  105. int fixup_map_xskmap[MAX_FIXUPS];
  106. int fixup_map_stacktrace[MAX_FIXUPS];
  107. int fixup_prog1[MAX_FIXUPS];
  108. int fixup_prog2[MAX_FIXUPS];
  109. int fixup_map_in_map[MAX_FIXUPS];
  110. int fixup_cgroup_storage[MAX_FIXUPS];
  111. int fixup_percpu_cgroup_storage[MAX_FIXUPS];
  112. int fixup_map_spin_lock[MAX_FIXUPS];
  113. int fixup_map_array_ro[MAX_FIXUPS];
  114. int fixup_map_array_wo[MAX_FIXUPS];
  115. int fixup_map_array_small[MAX_FIXUPS];
  116. int fixup_sk_storage_map[MAX_FIXUPS];
  117. int fixup_map_event_output[MAX_FIXUPS];
  118. int fixup_map_reuseport_array[MAX_FIXUPS];
  119. int fixup_map_ringbuf[MAX_FIXUPS];
  120. int fixup_map_timer[MAX_FIXUPS];
  121. int fixup_map_kptr[MAX_FIXUPS];
  122. struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
  123. /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
  124. * Can be a tab-separated sequence of expected strings. An empty string
  125. * means no log verification.
  126. */
  127. const char *errstr;
  128. const char *errstr_unpriv;
  129. uint32_t insn_processed;
  130. int prog_len;
  131. enum {
  132. UNDEF,
  133. ACCEPT,
  134. REJECT,
  135. VERBOSE_ACCEPT,
  136. } result, result_unpriv;
  137. enum bpf_prog_type prog_type;
  138. uint8_t flags;
  139. void (*fill_helper)(struct bpf_test *self);
  140. int runs;
  141. #define bpf_testdata_struct_t \
  142. struct { \
  143. uint32_t retval, retval_unpriv; \
  144. union { \
  145. __u8 data[TEST_DATA_LEN]; \
  146. __u64 data64[TEST_DATA_LEN / 8]; \
  147. }; \
  148. }
  149. union {
  150. bpf_testdata_struct_t;
  151. bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
  152. };
  153. enum bpf_attach_type expected_attach_type;
  154. const char *kfunc;
  155. struct bpf_func_info func_info[MAX_FUNC_INFOS];
  156. int func_info_cnt;
  157. char btf_strings[MAX_BTF_STRINGS];
  158. /* A set of BTF types to load when specified,
  159. * use macro definitions from test_btf.h,
  160. * must end with BTF_END_RAW
  161. */
  162. __u32 btf_types[MAX_BTF_TYPES];
  163. };
  164. /* Note we want this to be 64 bit aligned so that the end of our array is
  165. * actually the end of the structure.
  166. */
  167. #define MAX_ENTRIES 11
  168. struct test_val {
  169. unsigned int index;
  170. int foo[MAX_ENTRIES];
  171. };
  172. struct other_val {
  173. long long foo;
  174. long long bar;
  175. };
  176. static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
  177. {
  178. /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
  179. #define PUSH_CNT 51
  180. /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
  181. unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
  182. struct bpf_insn *insn = self->fill_insns;
  183. int i = 0, j, k = 0;
  184. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  185. loop:
  186. for (j = 0; j < PUSH_CNT; j++) {
  187. insn[i++] = BPF_LD_ABS(BPF_B, 0);
  188. /* jump to error label */
  189. insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
  190. i++;
  191. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
  192. insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
  193. insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
  194. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  195. BPF_FUNC_skb_vlan_push),
  196. insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
  197. i++;
  198. }
  199. for (j = 0; j < PUSH_CNT; j++) {
  200. insn[i++] = BPF_LD_ABS(BPF_B, 0);
  201. insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
  202. i++;
  203. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
  204. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  205. BPF_FUNC_skb_vlan_pop),
  206. insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
  207. i++;
  208. }
  209. if (++k < 5)
  210. goto loop;
  211. for (; i < len - 3; i++)
  212. insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
  213. insn[len - 3] = BPF_JMP_A(1);
  214. /* error label */
  215. insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
  216. insn[len - 1] = BPF_EXIT_INSN();
  217. self->prog_len = len;
  218. }
  219. static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
  220. {
  221. struct bpf_insn *insn = self->fill_insns;
  222. /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
  223. * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
  224. * to extend the error value of the inlined ld_abs sequence which then
  225. * contains 7 insns. so, set the dividend to 7 so the testcase could
  226. * work on all arches.
  227. */
  228. unsigned int len = (1 << 15) / 7;
  229. int i = 0;
  230. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  231. insn[i++] = BPF_LD_ABS(BPF_B, 0);
  232. insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
  233. i++;
  234. while (i < len - 1)
  235. insn[i++] = BPF_LD_ABS(BPF_B, 1);
  236. insn[i] = BPF_EXIT_INSN();
  237. self->prog_len = i + 1;
  238. }
  239. static void bpf_fill_rand_ld_dw(struct bpf_test *self)
  240. {
  241. struct bpf_insn *insn = self->fill_insns;
  242. uint64_t res = 0;
  243. int i = 0;
  244. insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
  245. while (i < self->retval) {
  246. uint64_t val = bpf_semi_rand_get();
  247. struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
  248. res ^= val;
  249. insn[i++] = tmp[0];
  250. insn[i++] = tmp[1];
  251. insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
  252. }
  253. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
  254. insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
  255. insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
  256. insn[i] = BPF_EXIT_INSN();
  257. self->prog_len = i + 1;
  258. res ^= (res >> 32);
  259. self->retval = (uint32_t)res;
  260. }
  261. #define MAX_JMP_SEQ 8192
  262. /* test the sequence of 8k jumps */
  263. static void bpf_fill_scale1(struct bpf_test *self)
  264. {
  265. struct bpf_insn *insn = self->fill_insns;
  266. int i = 0, k = 0;
  267. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  268. /* test to check that the long sequence of jumps is acceptable */
  269. while (k++ < MAX_JMP_SEQ) {
  270. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  271. BPF_FUNC_get_prandom_u32);
  272. insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
  273. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
  274. insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
  275. -8 * (k % 64 + 1));
  276. }
  277. /* is_state_visited() doesn't allocate state for pruning for every jump.
  278. * Hence multiply jmps by 4 to accommodate that heuristic
  279. */
  280. while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
  281. insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
  282. insn[i] = BPF_EXIT_INSN();
  283. self->prog_len = i + 1;
  284. self->retval = 42;
  285. }
  286. /* test the sequence of 8k jumps in inner most function (function depth 8)*/
  287. static void bpf_fill_scale2(struct bpf_test *self)
  288. {
  289. struct bpf_insn *insn = self->fill_insns;
  290. int i = 0, k = 0;
  291. #define FUNC_NEST 7
  292. for (k = 0; k < FUNC_NEST; k++) {
  293. insn[i++] = BPF_CALL_REL(1);
  294. insn[i++] = BPF_EXIT_INSN();
  295. }
  296. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  297. /* test to check that the long sequence of jumps is acceptable */
  298. k = 0;
  299. while (k++ < MAX_JMP_SEQ) {
  300. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  301. BPF_FUNC_get_prandom_u32);
  302. insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
  303. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
  304. insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
  305. -8 * (k % (64 - 4 * FUNC_NEST) + 1));
  306. }
  307. while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
  308. insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
  309. insn[i] = BPF_EXIT_INSN();
  310. self->prog_len = i + 1;
  311. self->retval = 42;
  312. }
  313. static void bpf_fill_scale(struct bpf_test *self)
  314. {
  315. switch (self->retval) {
  316. case 1:
  317. return bpf_fill_scale1(self);
  318. case 2:
  319. return bpf_fill_scale2(self);
  320. default:
  321. self->prog_len = 0;
  322. break;
  323. }
  324. }
  325. static int bpf_fill_torturous_jumps_insn_1(struct bpf_insn *insn)
  326. {
  327. unsigned int len = 259, hlen = 128;
  328. int i;
  329. insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
  330. for (i = 1; i <= hlen; i++) {
  331. insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, hlen);
  332. insn[i + hlen] = BPF_JMP_A(hlen - i);
  333. }
  334. insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1);
  335. insn[len - 1] = BPF_EXIT_INSN();
  336. return len;
  337. }
  338. static int bpf_fill_torturous_jumps_insn_2(struct bpf_insn *insn)
  339. {
  340. unsigned int len = 4100, jmp_off = 2048;
  341. int i, j;
  342. insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
  343. for (i = 1; i <= jmp_off; i++) {
  344. insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, jmp_off);
  345. }
  346. insn[i++] = BPF_JMP_A(jmp_off);
  347. for (; i <= jmp_off * 2 + 1; i+=16) {
  348. for (j = 0; j < 16; j++) {
  349. insn[i + j] = BPF_JMP_A(16 - j - 1);
  350. }
  351. }
  352. insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2);
  353. insn[len - 1] = BPF_EXIT_INSN();
  354. return len;
  355. }
  356. static void bpf_fill_torturous_jumps(struct bpf_test *self)
  357. {
  358. struct bpf_insn *insn = self->fill_insns;
  359. int i = 0;
  360. switch (self->retval) {
  361. case 1:
  362. self->prog_len = bpf_fill_torturous_jumps_insn_1(insn);
  363. return;
  364. case 2:
  365. self->prog_len = bpf_fill_torturous_jumps_insn_2(insn);
  366. return;
  367. case 3:
  368. /* main */
  369. insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4);
  370. insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 262);
  371. insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0);
  372. insn[i++] = BPF_MOV64_IMM(BPF_REG_0, 3);
  373. insn[i++] = BPF_EXIT_INSN();
  374. /* subprog 1 */
  375. i += bpf_fill_torturous_jumps_insn_1(insn + i);
  376. /* subprog 2 */
  377. i += bpf_fill_torturous_jumps_insn_2(insn + i);
  378. self->prog_len = i;
  379. return;
  380. default:
  381. self->prog_len = 0;
  382. break;
  383. }
  384. }
  385. static void bpf_fill_big_prog_with_loop_1(struct bpf_test *self)
  386. {
  387. struct bpf_insn *insn = self->fill_insns;
  388. /* This test was added to catch a specific use after free
  389. * error, which happened upon BPF program reallocation.
  390. * Reallocation is handled by core.c:bpf_prog_realloc, which
  391. * reuses old memory if page boundary is not crossed. The
  392. * value of `len` is chosen to cross this boundary on bpf_loop
  393. * patching.
  394. */
  395. const int len = getpagesize() - 25;
  396. int callback_load_idx;
  397. int callback_idx;
  398. int i = 0;
  399. insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1);
  400. callback_load_idx = i;
  401. insn[i++] = BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW,
  402. BPF_REG_2, BPF_PSEUDO_FUNC, 0,
  403. 777 /* filled below */);
  404. insn[i++] = BPF_RAW_INSN(0, 0, 0, 0, 0);
  405. insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0);
  406. insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0);
  407. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop);
  408. while (i < len - 3)
  409. insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
  410. insn[i++] = BPF_EXIT_INSN();
  411. callback_idx = i;
  412. insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
  413. insn[i++] = BPF_EXIT_INSN();
  414. insn[callback_load_idx].imm = callback_idx - callback_load_idx - 1;
  415. self->func_info[1].insn_off = callback_idx;
  416. self->prog_len = i;
  417. assert(i == len);
  418. }
  419. /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
  420. #define BPF_SK_LOOKUP(func) \
  421. /* struct bpf_sock_tuple tuple = {} */ \
  422. BPF_MOV64_IMM(BPF_REG_2, 0), \
  423. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
  424. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
  425. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
  426. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
  427. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
  428. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
  429. /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
  430. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
  431. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
  432. BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
  433. BPF_MOV64_IMM(BPF_REG_4, 0), \
  434. BPF_MOV64_IMM(BPF_REG_5, 0), \
  435. BPF_EMIT_CALL(BPF_FUNC_ ## func)
  436. /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
  437. * value into 0 and does necessary preparation for direct packet access
  438. * through r2. The allowed access range is 8 bytes.
  439. */
  440. #define BPF_DIRECT_PKT_R2 \
  441. BPF_MOV64_IMM(BPF_REG_0, 0), \
  442. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
  443. offsetof(struct __sk_buff, data)), \
  444. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
  445. offsetof(struct __sk_buff, data_end)), \
  446. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
  447. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
  448. BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
  449. BPF_EXIT_INSN()
  450. /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
  451. * positive u32, and zero-extend it into 64-bit.
  452. */
  453. #define BPF_RAND_UEXT_R7 \
  454. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
  455. BPF_FUNC_get_prandom_u32), \
  456. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
  457. BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
  458. BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
  459. /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
  460. * negative u32, and sign-extend it into 64-bit.
  461. */
  462. #define BPF_RAND_SEXT_R7 \
  463. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
  464. BPF_FUNC_get_prandom_u32), \
  465. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
  466. BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
  467. BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
  468. BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
  469. static struct bpf_test tests[] = {
  470. #define FILL_ARRAY
  471. #include <verifier/tests.h>
  472. #undef FILL_ARRAY
  473. };
  474. static int probe_filter_length(const struct bpf_insn *fp)
  475. {
  476. int len;
  477. for (len = MAX_INSNS - 1; len > 0; --len)
  478. if (fp[len].code != 0 || fp[len].imm != 0)
  479. break;
  480. return len + 1;
  481. }
  482. static bool skip_unsupported_map(enum bpf_map_type map_type)
  483. {
  484. if (!libbpf_probe_bpf_map_type(map_type, NULL)) {
  485. printf("SKIP (unsupported map type %d)\n", map_type);
  486. skips++;
  487. return true;
  488. }
  489. return false;
  490. }
  491. static int __create_map(uint32_t type, uint32_t size_key,
  492. uint32_t size_value, uint32_t max_elem,
  493. uint32_t extra_flags)
  494. {
  495. LIBBPF_OPTS(bpf_map_create_opts, opts);
  496. int fd;
  497. opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags;
  498. fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts);
  499. if (fd < 0) {
  500. if (skip_unsupported_map(type))
  501. return -1;
  502. printf("Failed to create hash map '%s'!\n", strerror(errno));
  503. }
  504. return fd;
  505. }
  506. static int create_map(uint32_t type, uint32_t size_key,
  507. uint32_t size_value, uint32_t max_elem)
  508. {
  509. return __create_map(type, size_key, size_value, max_elem, 0);
  510. }
  511. static void update_map(int fd, int index)
  512. {
  513. struct test_val value = {
  514. .index = (6 + 1) * sizeof(int),
  515. .foo[6] = 0xabcdef12,
  516. };
  517. assert(!bpf_map_update_elem(fd, &index, &value, 0));
  518. }
  519. static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
  520. {
  521. struct bpf_insn prog[] = {
  522. BPF_MOV64_IMM(BPF_REG_0, ret),
  523. BPF_EXIT_INSN(),
  524. };
  525. return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
  526. }
  527. static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
  528. int idx, int ret)
  529. {
  530. struct bpf_insn prog[] = {
  531. BPF_MOV64_IMM(BPF_REG_3, idx),
  532. BPF_LD_MAP_FD(BPF_REG_2, mfd),
  533. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  534. BPF_FUNC_tail_call),
  535. BPF_MOV64_IMM(BPF_REG_0, ret),
  536. BPF_EXIT_INSN(),
  537. };
  538. return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
  539. }
  540. static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
  541. int p1key, int p2key, int p3key)
  542. {
  543. int mfd, p1fd, p2fd, p3fd;
  544. mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int),
  545. sizeof(int), max_elem, NULL);
  546. if (mfd < 0) {
  547. if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
  548. return -1;
  549. printf("Failed to create prog array '%s'!\n", strerror(errno));
  550. return -1;
  551. }
  552. p1fd = create_prog_dummy_simple(prog_type, 42);
  553. p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
  554. p3fd = create_prog_dummy_simple(prog_type, 24);
  555. if (p1fd < 0 || p2fd < 0 || p3fd < 0)
  556. goto err;
  557. if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
  558. goto err;
  559. if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
  560. goto err;
  561. if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
  562. err:
  563. close(mfd);
  564. mfd = -1;
  565. }
  566. close(p3fd);
  567. close(p2fd);
  568. close(p1fd);
  569. return mfd;
  570. }
  571. static int create_map_in_map(void)
  572. {
  573. LIBBPF_OPTS(bpf_map_create_opts, opts);
  574. int inner_map_fd, outer_map_fd;
  575. inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int),
  576. sizeof(int), 1, NULL);
  577. if (inner_map_fd < 0) {
  578. if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
  579. return -1;
  580. printf("Failed to create array '%s'!\n", strerror(errno));
  581. return inner_map_fd;
  582. }
  583. opts.inner_map_fd = inner_map_fd;
  584. outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
  585. sizeof(int), sizeof(int), 1, &opts);
  586. if (outer_map_fd < 0) {
  587. if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
  588. return -1;
  589. printf("Failed to create array of maps '%s'!\n",
  590. strerror(errno));
  591. }
  592. close(inner_map_fd);
  593. return outer_map_fd;
  594. }
  595. static int create_cgroup_storage(bool percpu)
  596. {
  597. enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
  598. BPF_MAP_TYPE_CGROUP_STORAGE;
  599. int fd;
  600. fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key),
  601. TEST_DATA_LEN, 0, NULL);
  602. if (fd < 0) {
  603. if (skip_unsupported_map(type))
  604. return -1;
  605. printf("Failed to create cgroup storage '%s'!\n",
  606. strerror(errno));
  607. }
  608. return fd;
  609. }
  610. /* struct bpf_spin_lock {
  611. * int val;
  612. * };
  613. * struct val {
  614. * int cnt;
  615. * struct bpf_spin_lock l;
  616. * };
  617. * struct bpf_timer {
  618. * __u64 :64;
  619. * __u64 :64;
  620. * } __attribute__((aligned(8)));
  621. * struct timer {
  622. * struct bpf_timer t;
  623. * };
  624. * struct btf_ptr {
  625. * struct prog_test_ref_kfunc __kptr *ptr;
  626. * struct prog_test_ref_kfunc __kptr_ref *ptr;
  627. * struct prog_test_member __kptr_ref *ptr;
  628. * }
  629. */
  630. static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
  631. "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_ref"
  632. "\0prog_test_member";
  633. static __u32 btf_raw_types[] = {
  634. /* int */
  635. BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
  636. /* struct bpf_spin_lock */ /* [2] */
  637. BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
  638. BTF_MEMBER_ENC(15, 1, 0), /* int val; */
  639. /* struct val */ /* [3] */
  640. BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
  641. BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
  642. BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
  643. /* struct bpf_timer */ /* [4] */
  644. BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
  645. /* struct timer */ /* [5] */
  646. BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
  647. BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
  648. /* struct prog_test_ref_kfunc */ /* [6] */
  649. BTF_STRUCT_ENC(51, 0, 0),
  650. BTF_STRUCT_ENC(89, 0, 0), /* [7] */
  651. /* type tag "kptr" */
  652. BTF_TYPE_TAG_ENC(75, 6), /* [8] */
  653. /* type tag "kptr_ref" */
  654. BTF_TYPE_TAG_ENC(80, 6), /* [9] */
  655. BTF_TYPE_TAG_ENC(80, 7), /* [10] */
  656. BTF_PTR_ENC(8), /* [11] */
  657. BTF_PTR_ENC(9), /* [12] */
  658. BTF_PTR_ENC(10), /* [13] */
  659. /* struct btf_ptr */ /* [14] */
  660. BTF_STRUCT_ENC(43, 3, 24),
  661. BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr *ptr; */
  662. BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr_ref *ptr; */
  663. BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr_ref *ptr; */
  664. };
  665. static char bpf_vlog[UINT_MAX >> 8];
  666. static int load_btf_spec(__u32 *types, int types_len,
  667. const char *strings, int strings_len)
  668. {
  669. struct btf_header hdr = {
  670. .magic = BTF_MAGIC,
  671. .version = BTF_VERSION,
  672. .hdr_len = sizeof(struct btf_header),
  673. .type_len = types_len,
  674. .str_off = types_len,
  675. .str_len = strings_len,
  676. };
  677. void *ptr, *raw_btf;
  678. int btf_fd;
  679. LIBBPF_OPTS(bpf_btf_load_opts, opts,
  680. .log_buf = bpf_vlog,
  681. .log_size = sizeof(bpf_vlog),
  682. .log_level = (verbose
  683. ? VERBOSE_LIBBPF_LOG_LEVEL
  684. : DEFAULT_LIBBPF_LOG_LEVEL),
  685. );
  686. raw_btf = malloc(sizeof(hdr) + types_len + strings_len);
  687. ptr = raw_btf;
  688. memcpy(ptr, &hdr, sizeof(hdr));
  689. ptr += sizeof(hdr);
  690. memcpy(ptr, types, hdr.type_len);
  691. ptr += hdr.type_len;
  692. memcpy(ptr, strings, hdr.str_len);
  693. ptr += hdr.str_len;
  694. btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, &opts);
  695. if (btf_fd < 0)
  696. printf("Failed to load BTF spec: '%s'\n", strerror(errno));
  697. free(raw_btf);
  698. return btf_fd < 0 ? -1 : btf_fd;
  699. }
  700. static int load_btf(void)
  701. {
  702. return load_btf_spec(btf_raw_types, sizeof(btf_raw_types),
  703. btf_str_sec, sizeof(btf_str_sec));
  704. }
  705. static int load_btf_for_test(struct bpf_test *test)
  706. {
  707. int types_num = 0;
  708. while (types_num < MAX_BTF_TYPES &&
  709. test->btf_types[types_num] != BTF_END_RAW)
  710. ++types_num;
  711. int types_len = types_num * sizeof(test->btf_types[0]);
  712. return load_btf_spec(test->btf_types, types_len,
  713. test->btf_strings, sizeof(test->btf_strings));
  714. }
  715. static int create_map_spin_lock(void)
  716. {
  717. LIBBPF_OPTS(bpf_map_create_opts, opts,
  718. .btf_key_type_id = 1,
  719. .btf_value_type_id = 3,
  720. );
  721. int fd, btf_fd;
  722. btf_fd = load_btf();
  723. if (btf_fd < 0)
  724. return -1;
  725. opts.btf_fd = btf_fd;
  726. fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts);
  727. if (fd < 0)
  728. printf("Failed to create map with spin_lock\n");
  729. return fd;
  730. }
  731. static int create_sk_storage_map(void)
  732. {
  733. LIBBPF_OPTS(bpf_map_create_opts, opts,
  734. .map_flags = BPF_F_NO_PREALLOC,
  735. .btf_key_type_id = 1,
  736. .btf_value_type_id = 3,
  737. );
  738. int fd, btf_fd;
  739. btf_fd = load_btf();
  740. if (btf_fd < 0)
  741. return -1;
  742. opts.btf_fd = btf_fd;
  743. fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts);
  744. close(opts.btf_fd);
  745. if (fd < 0)
  746. printf("Failed to create sk_storage_map\n");
  747. return fd;
  748. }
  749. static int create_map_timer(void)
  750. {
  751. LIBBPF_OPTS(bpf_map_create_opts, opts,
  752. .btf_key_type_id = 1,
  753. .btf_value_type_id = 5,
  754. );
  755. int fd, btf_fd;
  756. btf_fd = load_btf();
  757. if (btf_fd < 0)
  758. return -1;
  759. opts.btf_fd = btf_fd;
  760. fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts);
  761. if (fd < 0)
  762. printf("Failed to create map with timer\n");
  763. return fd;
  764. }
  765. static int create_map_kptr(void)
  766. {
  767. LIBBPF_OPTS(bpf_map_create_opts, opts,
  768. .btf_key_type_id = 1,
  769. .btf_value_type_id = 14,
  770. );
  771. int fd, btf_fd;
  772. btf_fd = load_btf();
  773. if (btf_fd < 0)
  774. return -1;
  775. opts.btf_fd = btf_fd;
  776. fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
  777. if (fd < 0)
  778. printf("Failed to create map with btf_id pointer\n");
  779. return fd;
  780. }
  781. static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
  782. struct bpf_insn *prog, int *map_fds)
  783. {
  784. int *fixup_map_hash_8b = test->fixup_map_hash_8b;
  785. int *fixup_map_hash_48b = test->fixup_map_hash_48b;
  786. int *fixup_map_hash_16b = test->fixup_map_hash_16b;
  787. int *fixup_map_array_48b = test->fixup_map_array_48b;
  788. int *fixup_map_sockmap = test->fixup_map_sockmap;
  789. int *fixup_map_sockhash = test->fixup_map_sockhash;
  790. int *fixup_map_xskmap = test->fixup_map_xskmap;
  791. int *fixup_map_stacktrace = test->fixup_map_stacktrace;
  792. int *fixup_prog1 = test->fixup_prog1;
  793. int *fixup_prog2 = test->fixup_prog2;
  794. int *fixup_map_in_map = test->fixup_map_in_map;
  795. int *fixup_cgroup_storage = test->fixup_cgroup_storage;
  796. int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
  797. int *fixup_map_spin_lock = test->fixup_map_spin_lock;
  798. int *fixup_map_array_ro = test->fixup_map_array_ro;
  799. int *fixup_map_array_wo = test->fixup_map_array_wo;
  800. int *fixup_map_array_small = test->fixup_map_array_small;
  801. int *fixup_sk_storage_map = test->fixup_sk_storage_map;
  802. int *fixup_map_event_output = test->fixup_map_event_output;
  803. int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
  804. int *fixup_map_ringbuf = test->fixup_map_ringbuf;
  805. int *fixup_map_timer = test->fixup_map_timer;
  806. int *fixup_map_kptr = test->fixup_map_kptr;
  807. struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
  808. if (test->fill_helper) {
  809. test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
  810. test->fill_helper(test);
  811. }
  812. /* Allocating HTs with 1 elem is fine here, since we only test
  813. * for verifier and not do a runtime lookup, so the only thing
  814. * that really matters is value size in this case.
  815. */
  816. if (*fixup_map_hash_8b) {
  817. map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  818. sizeof(long long), 1);
  819. do {
  820. prog[*fixup_map_hash_8b].imm = map_fds[0];
  821. fixup_map_hash_8b++;
  822. } while (*fixup_map_hash_8b);
  823. }
  824. if (*fixup_map_hash_48b) {
  825. map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  826. sizeof(struct test_val), 1);
  827. do {
  828. prog[*fixup_map_hash_48b].imm = map_fds[1];
  829. fixup_map_hash_48b++;
  830. } while (*fixup_map_hash_48b);
  831. }
  832. if (*fixup_map_hash_16b) {
  833. map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  834. sizeof(struct other_val), 1);
  835. do {
  836. prog[*fixup_map_hash_16b].imm = map_fds[2];
  837. fixup_map_hash_16b++;
  838. } while (*fixup_map_hash_16b);
  839. }
  840. if (*fixup_map_array_48b) {
  841. map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  842. sizeof(struct test_val), 1);
  843. update_map(map_fds[3], 0);
  844. do {
  845. prog[*fixup_map_array_48b].imm = map_fds[3];
  846. fixup_map_array_48b++;
  847. } while (*fixup_map_array_48b);
  848. }
  849. if (*fixup_prog1) {
  850. map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
  851. do {
  852. prog[*fixup_prog1].imm = map_fds[4];
  853. fixup_prog1++;
  854. } while (*fixup_prog1);
  855. }
  856. if (*fixup_prog2) {
  857. map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
  858. do {
  859. prog[*fixup_prog2].imm = map_fds[5];
  860. fixup_prog2++;
  861. } while (*fixup_prog2);
  862. }
  863. if (*fixup_map_in_map) {
  864. map_fds[6] = create_map_in_map();
  865. do {
  866. prog[*fixup_map_in_map].imm = map_fds[6];
  867. fixup_map_in_map++;
  868. } while (*fixup_map_in_map);
  869. }
  870. if (*fixup_cgroup_storage) {
  871. map_fds[7] = create_cgroup_storage(false);
  872. do {
  873. prog[*fixup_cgroup_storage].imm = map_fds[7];
  874. fixup_cgroup_storage++;
  875. } while (*fixup_cgroup_storage);
  876. }
  877. if (*fixup_percpu_cgroup_storage) {
  878. map_fds[8] = create_cgroup_storage(true);
  879. do {
  880. prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
  881. fixup_percpu_cgroup_storage++;
  882. } while (*fixup_percpu_cgroup_storage);
  883. }
  884. if (*fixup_map_sockmap) {
  885. map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
  886. sizeof(int), 1);
  887. do {
  888. prog[*fixup_map_sockmap].imm = map_fds[9];
  889. fixup_map_sockmap++;
  890. } while (*fixup_map_sockmap);
  891. }
  892. if (*fixup_map_sockhash) {
  893. map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
  894. sizeof(int), 1);
  895. do {
  896. prog[*fixup_map_sockhash].imm = map_fds[10];
  897. fixup_map_sockhash++;
  898. } while (*fixup_map_sockhash);
  899. }
  900. if (*fixup_map_xskmap) {
  901. map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
  902. sizeof(int), 1);
  903. do {
  904. prog[*fixup_map_xskmap].imm = map_fds[11];
  905. fixup_map_xskmap++;
  906. } while (*fixup_map_xskmap);
  907. }
  908. if (*fixup_map_stacktrace) {
  909. map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
  910. sizeof(u64), 1);
  911. do {
  912. prog[*fixup_map_stacktrace].imm = map_fds[12];
  913. fixup_map_stacktrace++;
  914. } while (*fixup_map_stacktrace);
  915. }
  916. if (*fixup_map_spin_lock) {
  917. map_fds[13] = create_map_spin_lock();
  918. do {
  919. prog[*fixup_map_spin_lock].imm = map_fds[13];
  920. fixup_map_spin_lock++;
  921. } while (*fixup_map_spin_lock);
  922. }
  923. if (*fixup_map_array_ro) {
  924. map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  925. sizeof(struct test_val), 1,
  926. BPF_F_RDONLY_PROG);
  927. update_map(map_fds[14], 0);
  928. do {
  929. prog[*fixup_map_array_ro].imm = map_fds[14];
  930. fixup_map_array_ro++;
  931. } while (*fixup_map_array_ro);
  932. }
  933. if (*fixup_map_array_wo) {
  934. map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  935. sizeof(struct test_val), 1,
  936. BPF_F_WRONLY_PROG);
  937. update_map(map_fds[15], 0);
  938. do {
  939. prog[*fixup_map_array_wo].imm = map_fds[15];
  940. fixup_map_array_wo++;
  941. } while (*fixup_map_array_wo);
  942. }
  943. if (*fixup_map_array_small) {
  944. map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  945. 1, 1, 0);
  946. update_map(map_fds[16], 0);
  947. do {
  948. prog[*fixup_map_array_small].imm = map_fds[16];
  949. fixup_map_array_small++;
  950. } while (*fixup_map_array_small);
  951. }
  952. if (*fixup_sk_storage_map) {
  953. map_fds[17] = create_sk_storage_map();
  954. do {
  955. prog[*fixup_sk_storage_map].imm = map_fds[17];
  956. fixup_sk_storage_map++;
  957. } while (*fixup_sk_storage_map);
  958. }
  959. if (*fixup_map_event_output) {
  960. map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  961. sizeof(int), sizeof(int), 1, 0);
  962. do {
  963. prog[*fixup_map_event_output].imm = map_fds[18];
  964. fixup_map_event_output++;
  965. } while (*fixup_map_event_output);
  966. }
  967. if (*fixup_map_reuseport_array) {
  968. map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
  969. sizeof(u32), sizeof(u64), 1, 0);
  970. do {
  971. prog[*fixup_map_reuseport_array].imm = map_fds[19];
  972. fixup_map_reuseport_array++;
  973. } while (*fixup_map_reuseport_array);
  974. }
  975. if (*fixup_map_ringbuf) {
  976. map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
  977. 0, 4096);
  978. do {
  979. prog[*fixup_map_ringbuf].imm = map_fds[20];
  980. fixup_map_ringbuf++;
  981. } while (*fixup_map_ringbuf);
  982. }
  983. if (*fixup_map_timer) {
  984. map_fds[21] = create_map_timer();
  985. do {
  986. prog[*fixup_map_timer].imm = map_fds[21];
  987. fixup_map_timer++;
  988. } while (*fixup_map_timer);
  989. }
  990. if (*fixup_map_kptr) {
  991. map_fds[22] = create_map_kptr();
  992. do {
  993. prog[*fixup_map_kptr].imm = map_fds[22];
  994. fixup_map_kptr++;
  995. } while (*fixup_map_kptr);
  996. }
  997. /* Patch in kfunc BTF IDs */
  998. if (fixup_kfunc_btf_id->kfunc) {
  999. struct btf *btf;
  1000. int btf_id;
  1001. do {
  1002. btf_id = 0;
  1003. btf = btf__load_vmlinux_btf();
  1004. if (btf) {
  1005. btf_id = btf__find_by_name_kind(btf,
  1006. fixup_kfunc_btf_id->kfunc,
  1007. BTF_KIND_FUNC);
  1008. btf_id = btf_id < 0 ? 0 : btf_id;
  1009. }
  1010. btf__free(btf);
  1011. prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
  1012. fixup_kfunc_btf_id++;
  1013. } while (fixup_kfunc_btf_id->kfunc);
  1014. }
  1015. }
  1016. struct libcap {
  1017. struct __user_cap_header_struct hdr;
  1018. struct __user_cap_data_struct data[2];
  1019. };
  1020. static int set_admin(bool admin)
  1021. {
  1022. int err;
  1023. if (admin) {
  1024. err = cap_enable_effective(ADMIN_CAPS, NULL);
  1025. if (err)
  1026. perror("cap_enable_effective(ADMIN_CAPS)");
  1027. } else {
  1028. err = cap_disable_effective(ADMIN_CAPS, NULL);
  1029. if (err)
  1030. perror("cap_disable_effective(ADMIN_CAPS)");
  1031. }
  1032. return err;
  1033. }
  1034. static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
  1035. void *data, size_t size_data)
  1036. {
  1037. __u8 tmp[TEST_DATA_LEN << 2];
  1038. __u32 size_tmp = sizeof(tmp);
  1039. int err, saved_errno;
  1040. LIBBPF_OPTS(bpf_test_run_opts, topts,
  1041. .data_in = data,
  1042. .data_size_in = size_data,
  1043. .data_out = tmp,
  1044. .data_size_out = size_tmp,
  1045. .repeat = 1,
  1046. );
  1047. if (unpriv)
  1048. set_admin(true);
  1049. err = bpf_prog_test_run_opts(fd_prog, &topts);
  1050. saved_errno = errno;
  1051. if (unpriv)
  1052. set_admin(false);
  1053. if (err) {
  1054. switch (saved_errno) {
  1055. case ENOTSUPP:
  1056. printf("Did not run the program (not supported) ");
  1057. return 0;
  1058. case EPERM:
  1059. if (unpriv) {
  1060. printf("Did not run the program (no permission) ");
  1061. return 0;
  1062. }
  1063. /* fallthrough; */
  1064. default:
  1065. printf("FAIL: Unexpected bpf_prog_test_run error (%s) ",
  1066. strerror(saved_errno));
  1067. return err;
  1068. }
  1069. }
  1070. if (topts.retval != expected_val && expected_val != POINTER_VALUE) {
  1071. printf("FAIL retval %d != %d ", topts.retval, expected_val);
  1072. return 1;
  1073. }
  1074. return 0;
  1075. }
  1076. /* Returns true if every part of exp (tab-separated) appears in log, in order.
  1077. *
  1078. * If exp is an empty string, returns true.
  1079. */
  1080. static bool cmp_str_seq(const char *log, const char *exp)
  1081. {
  1082. char needle[200];
  1083. const char *p, *q;
  1084. int len;
  1085. do {
  1086. if (!strlen(exp))
  1087. break;
  1088. p = strchr(exp, '\t');
  1089. if (!p)
  1090. p = exp + strlen(exp);
  1091. len = p - exp;
  1092. if (len >= sizeof(needle) || !len) {
  1093. printf("FAIL\nTestcase bug\n");
  1094. return false;
  1095. }
  1096. strncpy(needle, exp, len);
  1097. needle[len] = 0;
  1098. q = strstr(log, needle);
  1099. if (!q) {
  1100. printf("FAIL\nUnexpected verifier log!\n"
  1101. "EXP: %s\nRES:\n", needle);
  1102. return false;
  1103. }
  1104. log = q + len;
  1105. exp = p + 1;
  1106. } while (*p);
  1107. return true;
  1108. }
  1109. static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
  1110. {
  1111. struct bpf_prog_info info = {};
  1112. __u32 info_len = sizeof(info);
  1113. __u32 xlated_prog_len;
  1114. __u32 buf_element_size = sizeof(struct bpf_insn);
  1115. if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
  1116. perror("bpf_obj_get_info_by_fd failed");
  1117. return -1;
  1118. }
  1119. xlated_prog_len = info.xlated_prog_len;
  1120. if (xlated_prog_len % buf_element_size) {
  1121. printf("Program length %d is not multiple of %d\n",
  1122. xlated_prog_len, buf_element_size);
  1123. return -1;
  1124. }
  1125. *cnt = xlated_prog_len / buf_element_size;
  1126. *buf = calloc(*cnt, buf_element_size);
  1127. if (!buf) {
  1128. perror("can't allocate xlated program buffer");
  1129. return -ENOMEM;
  1130. }
  1131. bzero(&info, sizeof(info));
  1132. info.xlated_prog_len = xlated_prog_len;
  1133. info.xlated_prog_insns = (__u64)(unsigned long)*buf;
  1134. if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
  1135. perror("second bpf_obj_get_info_by_fd failed");
  1136. goto out_free_buf;
  1137. }
  1138. return 0;
  1139. out_free_buf:
  1140. free(*buf);
  1141. return -1;
  1142. }
  1143. static bool is_null_insn(struct bpf_insn *insn)
  1144. {
  1145. struct bpf_insn null_insn = {};
  1146. return memcmp(insn, &null_insn, sizeof(null_insn)) == 0;
  1147. }
  1148. static bool is_skip_insn(struct bpf_insn *insn)
  1149. {
  1150. struct bpf_insn skip_insn = SKIP_INSNS();
  1151. return memcmp(insn, &skip_insn, sizeof(skip_insn)) == 0;
  1152. }
  1153. static int null_terminated_insn_len(struct bpf_insn *seq, int max_len)
  1154. {
  1155. int i;
  1156. for (i = 0; i < max_len; ++i) {
  1157. if (is_null_insn(&seq[i]))
  1158. return i;
  1159. }
  1160. return max_len;
  1161. }
  1162. static bool compare_masked_insn(struct bpf_insn *orig, struct bpf_insn *masked)
  1163. {
  1164. struct bpf_insn orig_masked;
  1165. memcpy(&orig_masked, orig, sizeof(orig_masked));
  1166. if (masked->imm == INSN_IMM_MASK)
  1167. orig_masked.imm = INSN_IMM_MASK;
  1168. if (masked->off == INSN_OFF_MASK)
  1169. orig_masked.off = INSN_OFF_MASK;
  1170. return memcmp(&orig_masked, masked, sizeof(orig_masked)) == 0;
  1171. }
  1172. static int find_insn_subseq(struct bpf_insn *seq, struct bpf_insn *subseq,
  1173. int seq_len, int subseq_len)
  1174. {
  1175. int i, j;
  1176. if (subseq_len > seq_len)
  1177. return -1;
  1178. for (i = 0; i < seq_len - subseq_len + 1; ++i) {
  1179. bool found = true;
  1180. for (j = 0; j < subseq_len; ++j) {
  1181. if (!compare_masked_insn(&seq[i + j], &subseq[j])) {
  1182. found = false;
  1183. break;
  1184. }
  1185. }
  1186. if (found)
  1187. return i;
  1188. }
  1189. return -1;
  1190. }
  1191. static int find_skip_insn_marker(struct bpf_insn *seq, int len)
  1192. {
  1193. int i;
  1194. for (i = 0; i < len; ++i)
  1195. if (is_skip_insn(&seq[i]))
  1196. return i;
  1197. return -1;
  1198. }
  1199. /* Return true if all sub-sequences in `subseqs` could be found in
  1200. * `seq` one after another. Sub-sequences are separated by a single
  1201. * nil instruction.
  1202. */
  1203. static bool find_all_insn_subseqs(struct bpf_insn *seq, struct bpf_insn *subseqs,
  1204. int seq_len, int max_subseqs_len)
  1205. {
  1206. int subseqs_len = null_terminated_insn_len(subseqs, max_subseqs_len);
  1207. while (subseqs_len > 0) {
  1208. int skip_idx = find_skip_insn_marker(subseqs, subseqs_len);
  1209. int cur_subseq_len = skip_idx < 0 ? subseqs_len : skip_idx;
  1210. int subseq_idx = find_insn_subseq(seq, subseqs,
  1211. seq_len, cur_subseq_len);
  1212. if (subseq_idx < 0)
  1213. return false;
  1214. seq += subseq_idx + cur_subseq_len;
  1215. seq_len -= subseq_idx + cur_subseq_len;
  1216. subseqs += cur_subseq_len + 1;
  1217. subseqs_len -= cur_subseq_len + 1;
  1218. }
  1219. return true;
  1220. }
  1221. static void print_insn(struct bpf_insn *buf, int cnt)
  1222. {
  1223. int i;
  1224. printf(" addr op d s off imm\n");
  1225. for (i = 0; i < cnt; ++i) {
  1226. struct bpf_insn *insn = &buf[i];
  1227. if (is_null_insn(insn))
  1228. break;
  1229. if (is_skip_insn(insn))
  1230. printf(" ...\n");
  1231. else
  1232. printf(" %04x: %02x %1x %x %04hx %08x\n",
  1233. i, insn->code, insn->dst_reg,
  1234. insn->src_reg, insn->off, insn->imm);
  1235. }
  1236. }
  1237. static bool check_xlated_program(struct bpf_test *test, int fd_prog)
  1238. {
  1239. struct bpf_insn *buf;
  1240. int cnt;
  1241. bool result = true;
  1242. bool check_expected = !is_null_insn(test->expected_insns);
  1243. bool check_unexpected = !is_null_insn(test->unexpected_insns);
  1244. if (!check_expected && !check_unexpected)
  1245. goto out;
  1246. if (get_xlated_program(fd_prog, &buf, &cnt)) {
  1247. printf("FAIL: can't get xlated program\n");
  1248. result = false;
  1249. goto out;
  1250. }
  1251. if (check_expected &&
  1252. !find_all_insn_subseqs(buf, test->expected_insns,
  1253. cnt, MAX_EXPECTED_INSNS)) {
  1254. printf("FAIL: can't find expected subsequence of instructions\n");
  1255. result = false;
  1256. if (verbose) {
  1257. printf("Program:\n");
  1258. print_insn(buf, cnt);
  1259. printf("Expected subsequence:\n");
  1260. print_insn(test->expected_insns, MAX_EXPECTED_INSNS);
  1261. }
  1262. }
  1263. if (check_unexpected &&
  1264. find_all_insn_subseqs(buf, test->unexpected_insns,
  1265. cnt, MAX_UNEXPECTED_INSNS)) {
  1266. printf("FAIL: found unexpected subsequence of instructions\n");
  1267. result = false;
  1268. if (verbose) {
  1269. printf("Program:\n");
  1270. print_insn(buf, cnt);
  1271. printf("Un-expected subsequence:\n");
  1272. print_insn(test->unexpected_insns, MAX_UNEXPECTED_INSNS);
  1273. }
  1274. }
  1275. free(buf);
  1276. out:
  1277. return result;
  1278. }
  1279. static void do_test_single(struct bpf_test *test, bool unpriv,
  1280. int *passes, int *errors)
  1281. {
  1282. int fd_prog, btf_fd, expected_ret, alignment_prevented_execution;
  1283. int prog_len, prog_type = test->prog_type;
  1284. struct bpf_insn *prog = test->insns;
  1285. LIBBPF_OPTS(bpf_prog_load_opts, opts);
  1286. int run_errs, run_successes;
  1287. int map_fds[MAX_NR_MAPS];
  1288. const char *expected_err;
  1289. int saved_errno;
  1290. int fixup_skips;
  1291. __u32 pflags;
  1292. int i, err;
  1293. fd_prog = -1;
  1294. for (i = 0; i < MAX_NR_MAPS; i++)
  1295. map_fds[i] = -1;
  1296. btf_fd = -1;
  1297. if (!prog_type)
  1298. prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
  1299. fixup_skips = skips;
  1300. do_test_fixup(test, prog_type, prog, map_fds);
  1301. if (test->fill_insns) {
  1302. prog = test->fill_insns;
  1303. prog_len = test->prog_len;
  1304. } else {
  1305. prog_len = probe_filter_length(prog);
  1306. }
  1307. /* If there were some map skips during fixup due to missing bpf
  1308. * features, skip this test.
  1309. */
  1310. if (fixup_skips != skips)
  1311. return;
  1312. pflags = BPF_F_TEST_RND_HI32;
  1313. if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
  1314. pflags |= BPF_F_STRICT_ALIGNMENT;
  1315. if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
  1316. pflags |= BPF_F_ANY_ALIGNMENT;
  1317. if (test->flags & ~3)
  1318. pflags |= test->flags;
  1319. expected_ret = unpriv && test->result_unpriv != UNDEF ?
  1320. test->result_unpriv : test->result;
  1321. expected_err = unpriv && test->errstr_unpriv ?
  1322. test->errstr_unpriv : test->errstr;
  1323. opts.expected_attach_type = test->expected_attach_type;
  1324. if (verbose)
  1325. opts.log_level = VERBOSE_LIBBPF_LOG_LEVEL;
  1326. else if (expected_ret == VERBOSE_ACCEPT)
  1327. opts.log_level = 2;
  1328. else
  1329. opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
  1330. opts.prog_flags = pflags;
  1331. if ((prog_type == BPF_PROG_TYPE_TRACING ||
  1332. prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) {
  1333. int attach_btf_id;
  1334. attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
  1335. opts.expected_attach_type);
  1336. if (attach_btf_id < 0) {
  1337. printf("FAIL\nFailed to find BTF ID for '%s'!\n",
  1338. test->kfunc);
  1339. (*errors)++;
  1340. return;
  1341. }
  1342. opts.attach_btf_id = attach_btf_id;
  1343. }
  1344. if (test->btf_types[0] != 0) {
  1345. btf_fd = load_btf_for_test(test);
  1346. if (btf_fd < 0)
  1347. goto fail_log;
  1348. opts.prog_btf_fd = btf_fd;
  1349. }
  1350. if (test->func_info_cnt != 0) {
  1351. opts.func_info = test->func_info;
  1352. opts.func_info_cnt = test->func_info_cnt;
  1353. opts.func_info_rec_size = sizeof(test->func_info[0]);
  1354. }
  1355. opts.log_buf = bpf_vlog;
  1356. opts.log_size = sizeof(bpf_vlog);
  1357. fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts);
  1358. saved_errno = errno;
  1359. /* BPF_PROG_TYPE_TRACING requires more setup and
  1360. * bpf_probe_prog_type won't give correct answer
  1361. */
  1362. if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
  1363. !libbpf_probe_bpf_prog_type(prog_type, NULL)) {
  1364. printf("SKIP (unsupported program type %d)\n", prog_type);
  1365. skips++;
  1366. goto close_fds;
  1367. }
  1368. if (fd_prog < 0 && saved_errno == ENOTSUPP) {
  1369. printf("SKIP (program uses an unsupported feature)\n");
  1370. skips++;
  1371. goto close_fds;
  1372. }
  1373. alignment_prevented_execution = 0;
  1374. if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
  1375. if (fd_prog < 0) {
  1376. printf("FAIL\nFailed to load prog '%s'!\n",
  1377. strerror(saved_errno));
  1378. goto fail_log;
  1379. }
  1380. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1381. if (fd_prog >= 0 &&
  1382. (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
  1383. alignment_prevented_execution = 1;
  1384. #endif
  1385. if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
  1386. goto fail_log;
  1387. }
  1388. } else {
  1389. if (fd_prog >= 0) {
  1390. printf("FAIL\nUnexpected success to load!\n");
  1391. goto fail_log;
  1392. }
  1393. if (!expected_err || !cmp_str_seq(bpf_vlog, expected_err)) {
  1394. printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
  1395. expected_err, bpf_vlog);
  1396. goto fail_log;
  1397. }
  1398. }
  1399. if (!unpriv && test->insn_processed) {
  1400. uint32_t insn_processed;
  1401. char *proc;
  1402. proc = strstr(bpf_vlog, "processed ");
  1403. insn_processed = atoi(proc + 10);
  1404. if (test->insn_processed != insn_processed) {
  1405. printf("FAIL\nUnexpected insn_processed %u vs %u\n",
  1406. insn_processed, test->insn_processed);
  1407. goto fail_log;
  1408. }
  1409. }
  1410. if (verbose)
  1411. printf(", verifier log:\n%s", bpf_vlog);
  1412. if (!check_xlated_program(test, fd_prog))
  1413. goto fail_log;
  1414. run_errs = 0;
  1415. run_successes = 0;
  1416. if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
  1417. uint32_t expected_val;
  1418. int i;
  1419. if (!test->runs)
  1420. test->runs = 1;
  1421. for (i = 0; i < test->runs; i++) {
  1422. if (unpriv && test->retvals[i].retval_unpriv)
  1423. expected_val = test->retvals[i].retval_unpriv;
  1424. else
  1425. expected_val = test->retvals[i].retval;
  1426. err = do_prog_test_run(fd_prog, unpriv, expected_val,
  1427. test->retvals[i].data,
  1428. sizeof(test->retvals[i].data));
  1429. if (err) {
  1430. printf("(run %d/%d) ", i + 1, test->runs);
  1431. run_errs++;
  1432. } else {
  1433. run_successes++;
  1434. }
  1435. }
  1436. }
  1437. if (!run_errs) {
  1438. (*passes)++;
  1439. if (run_successes > 1)
  1440. printf("%d cases ", run_successes);
  1441. printf("OK");
  1442. if (alignment_prevented_execution)
  1443. printf(" (NOTE: not executed due to unknown alignment)");
  1444. printf("\n");
  1445. } else {
  1446. printf("\n");
  1447. goto fail_log;
  1448. }
  1449. close_fds:
  1450. if (test->fill_insns)
  1451. free(test->fill_insns);
  1452. close(fd_prog);
  1453. close(btf_fd);
  1454. for (i = 0; i < MAX_NR_MAPS; i++)
  1455. close(map_fds[i]);
  1456. sched_yield();
  1457. return;
  1458. fail_log:
  1459. (*errors)++;
  1460. printf("%s", bpf_vlog);
  1461. goto close_fds;
  1462. }
  1463. static bool is_admin(void)
  1464. {
  1465. __u64 caps;
  1466. /* The test checks for finer cap as CAP_NET_ADMIN,
  1467. * CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN.
  1468. * Thus, disable CAP_SYS_ADMIN at the beginning.
  1469. */
  1470. if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) {
  1471. perror("cap_disable_effective(CAP_SYS_ADMIN)");
  1472. return false;
  1473. }
  1474. return (caps & ADMIN_CAPS) == ADMIN_CAPS;
  1475. }
  1476. static void get_unpriv_disabled()
  1477. {
  1478. char buf[2];
  1479. FILE *fd;
  1480. fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
  1481. if (!fd) {
  1482. perror("fopen /proc/sys/"UNPRIV_SYSCTL);
  1483. unpriv_disabled = true;
  1484. return;
  1485. }
  1486. if (fgets(buf, 2, fd) == buf && atoi(buf))
  1487. unpriv_disabled = true;
  1488. fclose(fd);
  1489. }
  1490. static bool test_as_unpriv(struct bpf_test *test)
  1491. {
  1492. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1493. /* Some architectures have strict alignment requirements. In
  1494. * that case, the BPF verifier detects if a program has
  1495. * unaligned accesses and rejects them. A user can pass
  1496. * BPF_F_ANY_ALIGNMENT to a program to override this
  1497. * check. That, however, will only work when a privileged user
  1498. * loads a program. An unprivileged user loading a program
  1499. * with this flag will be rejected prior entering the
  1500. * verifier.
  1501. */
  1502. if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
  1503. return false;
  1504. #endif
  1505. return !test->prog_type ||
  1506. test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
  1507. test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
  1508. }
  1509. static int do_test(bool unpriv, unsigned int from, unsigned int to)
  1510. {
  1511. int i, passes = 0, errors = 0;
  1512. for (i = from; i < to; i++) {
  1513. struct bpf_test *test = &tests[i];
  1514. /* Program types that are not supported by non-root we
  1515. * skip right away.
  1516. */
  1517. if (test_as_unpriv(test) && unpriv_disabled) {
  1518. printf("#%d/u %s SKIP\n", i, test->descr);
  1519. skips++;
  1520. } else if (test_as_unpriv(test)) {
  1521. if (!unpriv)
  1522. set_admin(false);
  1523. printf("#%d/u %s ", i, test->descr);
  1524. do_test_single(test, true, &passes, &errors);
  1525. if (!unpriv)
  1526. set_admin(true);
  1527. }
  1528. if (unpriv) {
  1529. printf("#%d/p %s SKIP\n", i, test->descr);
  1530. skips++;
  1531. } else {
  1532. printf("#%d/p %s ", i, test->descr);
  1533. do_test_single(test, false, &passes, &errors);
  1534. }
  1535. }
  1536. printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
  1537. skips, errors);
  1538. return errors ? EXIT_FAILURE : EXIT_SUCCESS;
  1539. }
  1540. int main(int argc, char **argv)
  1541. {
  1542. unsigned int from = 0, to = ARRAY_SIZE(tests);
  1543. bool unpriv = !is_admin();
  1544. int arg = 1;
  1545. if (argc > 1 && strcmp(argv[1], "-v") == 0) {
  1546. arg++;
  1547. verbose = true;
  1548. argc--;
  1549. }
  1550. if (argc == 3) {
  1551. unsigned int l = atoi(argv[arg]);
  1552. unsigned int u = atoi(argv[arg + 1]);
  1553. if (l < to && u < to) {
  1554. from = l;
  1555. to = u + 1;
  1556. }
  1557. } else if (argc == 2) {
  1558. unsigned int t = atoi(argv[arg]);
  1559. if (t < to) {
  1560. from = t;
  1561. to = t + 1;
  1562. }
  1563. }
  1564. get_unpriv_disabled();
  1565. if (unpriv && unpriv_disabled) {
  1566. printf("Cannot run as unprivileged user with sysctl %s.\n",
  1567. UNPRIV_SYSCTL);
  1568. return EXIT_FAILURE;
  1569. }
  1570. /* Use libbpf 1.0 API mode */
  1571. libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
  1572. bpf_semi_rand_init();
  1573. return do_test(unpriv, from, to);
  1574. }