xskxceiver.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2020 Intel Corporation. */
  3. /*
  4. * Some functions in this program are taken from
  5. * Linux kernel samples/bpf/xdpsock* and modified
  6. * for use.
  7. *
  8. * See test_xsk.sh for detailed information on test topology
  9. * and prerequisite network setup.
  10. *
  11. * This test program contains two threads, each thread is single socket with
  12. * a unique UMEM. It validates in-order packet delivery and packet content
  13. * by sending packets to each other.
  14. *
  15. * Tests Information:
  16. * ------------------
  17. * These selftests test AF_XDP SKB and Native/DRV modes using veth
  18. * Virtual Ethernet interfaces.
  19. *
  20. * For each mode, the following tests are run:
  21. * a. nopoll - soft-irq processing in run-to-completion mode
  22. * b. poll - using poll() syscall
  23. * c. Socket Teardown
  24. * Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy
  25. * both sockets, then repeat multiple times. Only nopoll mode is used
  26. * d. Bi-directional sockets
  27. * Configure sockets as bi-directional tx/rx sockets, sets up fill and
  28. * completion rings on each socket, tx/rx in both directions. Only nopoll
  29. * mode is used
  30. * e. Statistics
  31. * Trigger some error conditions and ensure that the appropriate statistics
  32. * are incremented. Within this test, the following statistics are tested:
  33. * i. rx dropped
  34. * Increase the UMEM frame headroom to a value which results in
  35. * insufficient space in the rx buffer for both the packet and the headroom.
  36. * ii. tx invalid
  37. * Set the 'len' field of tx descriptors to an invalid value (umem frame
  38. * size + 1).
  39. * iii. rx ring full
  40. * Reduce the size of the RX ring to a fraction of the fill ring size.
  41. * iv. fill queue empty
  42. * Do not populate the fill queue and then try to receive pkts.
  43. * f. bpf_link resource persistence
  44. * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
  45. * then remove xsk sockets from queue 0 on both veth interfaces and
  46. * finally run a traffic on queues ids 1
  47. * g. unaligned mode
  48. * h. tests for invalid and corner case Tx descriptors so that the correct ones
  49. * are discarded and let through, respectively.
  50. * i. 2K frame size tests
  51. *
  52. * Total tests: 12
  53. *
  54. * Flow:
  55. * -----
  56. * - Single process spawns two threads: Tx and Rx
  57. * - Each of these two threads attach to a veth interface within their assigned
  58. * namespaces
  59. * - Each thread Creates one AF_XDP socket connected to a unique umem for each
  60. * veth interface
  61. * - Tx thread Transmits 10k packets from veth<xxxx> to veth<yyyy>
  62. * - Rx thread verifies if all 10k packets were received and delivered in-order,
  63. * and have the right content
  64. *
  65. * Enable/disable packet dump mode:
  66. * --------------------------
  67. * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add
  68. * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D")
  69. */
  70. #define _GNU_SOURCE
  71. #include <fcntl.h>
  72. #include <errno.h>
  73. #include <getopt.h>
  74. #include <asm/barrier.h>
  75. #include <linux/if_link.h>
  76. #include <linux/if_ether.h>
  77. #include <linux/ip.h>
  78. #include <linux/udp.h>
  79. #include <arpa/inet.h>
  80. #include <net/if.h>
  81. #include <locale.h>
  82. #include <poll.h>
  83. #include <pthread.h>
  84. #include <signal.h>
  85. #include <stdbool.h>
  86. #include <stdio.h>
  87. #include <stdlib.h>
  88. #include <string.h>
  89. #include <stddef.h>
  90. #include <sys/mman.h>
  91. #include <sys/socket.h>
  92. #include <sys/time.h>
  93. #include <sys/types.h>
  94. #include <sys/queue.h>
  95. #include <time.h>
  96. #include <unistd.h>
  97. #include <stdatomic.h>
  98. #include "xsk.h"
  99. #include "xskxceiver.h"
  100. #include <bpf/bpf.h>
  101. #include <linux/filter.h>
  102. #include "../kselftest.h"
  103. /* AF_XDP APIs were moved into libxdp and marked as deprecated in libbpf.
  104. * Until xskxceiver is either moved or re-writed into libxdp, suppress
  105. * deprecation warnings in this file
  106. */
  107. #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
  108. static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
  109. static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
  110. static const char *IP1 = "192.168.100.162";
  111. static const char *IP2 = "192.168.100.161";
  112. static const u16 UDP_PORT1 = 2020;
  113. static const u16 UDP_PORT2 = 2121;
  114. static void __exit_with_error(int error, const char *file, const char *func, int line)
  115. {
  116. ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
  117. strerror(error));
  118. ksft_exit_xfail();
  119. }
  120. #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
  121. #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
  122. static char *mode_string(struct test_spec *test)
  123. {
  124. switch (test->mode) {
  125. case TEST_MODE_SKB:
  126. return "SKB";
  127. case TEST_MODE_DRV:
  128. return "DRV";
  129. case TEST_MODE_ZC:
  130. return "ZC";
  131. default:
  132. return "BOGUS";
  133. }
  134. }
  135. static void report_failure(struct test_spec *test)
  136. {
  137. if (test->fail)
  138. return;
  139. ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
  140. test->name);
  141. test->fail = true;
  142. }
  143. static void memset32_htonl(void *dest, u32 val, u32 size)
  144. {
  145. u32 *ptr = (u32 *)dest;
  146. int i;
  147. val = htonl(val);
  148. for (i = 0; i < (size & (~0x3)); i += 4)
  149. ptr[i >> 2] = val;
  150. }
  151. /*
  152. * Fold a partial checksum
  153. * This function code has been taken from
  154. * Linux kernel include/asm-generic/checksum.h
  155. */
  156. static __u16 csum_fold(__u32 csum)
  157. {
  158. u32 sum = (__force u32)csum;
  159. sum = (sum & 0xffff) + (sum >> 16);
  160. sum = (sum & 0xffff) + (sum >> 16);
  161. return (__force __u16)~sum;
  162. }
  163. /*
  164. * This function code has been taken from
  165. * Linux kernel lib/checksum.c
  166. */
  167. static u32 from64to32(u64 x)
  168. {
  169. /* add up 32-bit and 32-bit for 32+c bit */
  170. x = (x & 0xffffffff) + (x >> 32);
  171. /* add up carry.. */
  172. x = (x & 0xffffffff) + (x >> 32);
  173. return (u32)x;
  174. }
  175. /*
  176. * This function code has been taken from
  177. * Linux kernel lib/checksum.c
  178. */
  179. static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
  180. {
  181. unsigned long long s = (__force u32)sum;
  182. s += (__force u32)saddr;
  183. s += (__force u32)daddr;
  184. #ifdef __BIG_ENDIAN__
  185. s += proto + len;
  186. #else
  187. s += (proto + len) << 8;
  188. #endif
  189. return (__force __u32)from64to32(s);
  190. }
  191. /*
  192. * This function has been taken from
  193. * Linux kernel include/asm-generic/checksum.h
  194. */
  195. static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
  196. {
  197. return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
  198. }
  199. static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt)
  200. {
  201. u32 csum = 0;
  202. u32 cnt = 0;
  203. /* udp hdr and data */
  204. for (; cnt < len; cnt += 2)
  205. csum += udp_pkt[cnt >> 1];
  206. return csum_tcpudp_magic(saddr, daddr, len, proto, csum);
  207. }
  208. static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr)
  209. {
  210. memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN);
  211. memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN);
  212. eth_hdr->h_proto = htons(ETH_P_IP);
  213. }
  214. static void gen_ip_hdr(struct ifobject *ifobject, struct iphdr *ip_hdr)
  215. {
  216. ip_hdr->version = IP_PKT_VER;
  217. ip_hdr->ihl = 0x5;
  218. ip_hdr->tos = IP_PKT_TOS;
  219. ip_hdr->tot_len = htons(IP_PKT_SIZE);
  220. ip_hdr->id = 0;
  221. ip_hdr->frag_off = 0;
  222. ip_hdr->ttl = IPDEFTTL;
  223. ip_hdr->protocol = IPPROTO_UDP;
  224. ip_hdr->saddr = ifobject->src_ip;
  225. ip_hdr->daddr = ifobject->dst_ip;
  226. ip_hdr->check = 0;
  227. }
  228. static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject,
  229. struct udphdr *udp_hdr)
  230. {
  231. udp_hdr->source = htons(ifobject->src_port);
  232. udp_hdr->dest = htons(ifobject->dst_port);
  233. udp_hdr->len = htons(UDP_PKT_SIZE);
  234. memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE);
  235. }
  236. static bool is_umem_valid(struct ifobject *ifobj)
  237. {
  238. return !!ifobj->umem->umem;
  239. }
  240. static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr)
  241. {
  242. udp_hdr->check = 0;
  243. udp_hdr->check =
  244. udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr);
  245. }
  246. static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size)
  247. {
  248. struct xsk_umem_config cfg = {
  249. .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
  250. .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
  251. .frame_size = umem->frame_size,
  252. .frame_headroom = umem->frame_headroom,
  253. .flags = XSK_UMEM__DEFAULT_FLAGS
  254. };
  255. int ret;
  256. if (umem->unaligned_mode)
  257. cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
  258. ret = xsk_umem__create(&umem->umem, buffer, size,
  259. &umem->fq, &umem->cq, &cfg);
  260. if (ret)
  261. return ret;
  262. umem->buffer = buffer;
  263. return 0;
  264. }
  265. static void enable_busy_poll(struct xsk_socket_info *xsk)
  266. {
  267. int sock_opt;
  268. sock_opt = 1;
  269. if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
  270. (void *)&sock_opt, sizeof(sock_opt)) < 0)
  271. exit_with_error(errno);
  272. sock_opt = 20;
  273. if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
  274. (void *)&sock_opt, sizeof(sock_opt)) < 0)
  275. exit_with_error(errno);
  276. sock_opt = BATCH_SIZE;
  277. if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
  278. (void *)&sock_opt, sizeof(sock_opt)) < 0)
  279. exit_with_error(errno);
  280. }
  281. static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
  282. struct ifobject *ifobject, bool shared)
  283. {
  284. struct xsk_socket_config cfg = {};
  285. struct xsk_ring_cons *rxr;
  286. struct xsk_ring_prod *txr;
  287. xsk->umem = umem;
  288. cfg.rx_size = xsk->rxqsize;
  289. cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
  290. cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
  291. cfg.xdp_flags = ifobject->xdp_flags;
  292. cfg.bind_flags = ifobject->bind_flags;
  293. if (shared)
  294. cfg.bind_flags |= XDP_SHARED_UMEM;
  295. txr = ifobject->tx_on ? &xsk->tx : NULL;
  296. rxr = ifobject->rx_on ? &xsk->rx : NULL;
  297. return xsk_socket__create(&xsk->xsk, ifobject->ifname, 0, umem->umem, rxr, txr, &cfg);
  298. }
  299. static bool ifobj_zc_avail(struct ifobject *ifobject)
  300. {
  301. size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
  302. int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
  303. struct xsk_socket_info *xsk;
  304. struct xsk_umem_info *umem;
  305. bool zc_avail = false;
  306. void *bufs;
  307. int ret;
  308. bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
  309. if (bufs == MAP_FAILED)
  310. exit_with_error(errno);
  311. umem = calloc(1, sizeof(struct xsk_umem_info));
  312. if (!umem) {
  313. munmap(bufs, umem_sz);
  314. exit_with_error(ENOMEM);
  315. }
  316. umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
  317. ret = xsk_configure_umem(umem, bufs, umem_sz);
  318. if (ret)
  319. exit_with_error(-ret);
  320. xsk = calloc(1, sizeof(struct xsk_socket_info));
  321. if (!xsk)
  322. goto out;
  323. ifobject->xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
  324. ifobject->xdp_flags |= XDP_FLAGS_DRV_MODE;
  325. ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY;
  326. ifobject->rx_on = true;
  327. xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
  328. ret = __xsk_configure_socket(xsk, umem, ifobject, false);
  329. if (!ret)
  330. zc_avail = true;
  331. xsk_socket__delete(xsk->xsk);
  332. free(xsk);
  333. out:
  334. munmap(umem->buffer, umem_sz);
  335. xsk_umem__delete(umem->umem);
  336. free(umem);
  337. return zc_avail;
  338. }
  339. static struct option long_options[] = {
  340. {"interface", required_argument, 0, 'i'},
  341. {"busy-poll", no_argument, 0, 'b'},
  342. {"dump-pkts", no_argument, 0, 'D'},
  343. {"verbose", no_argument, 0, 'v'},
  344. {0, 0, 0, 0}
  345. };
  346. static void usage(const char *prog)
  347. {
  348. const char *str =
  349. " Usage: %s [OPTIONS]\n"
  350. " Options:\n"
  351. " -i, --interface Use interface\n"
  352. " -D, --dump-pkts Dump packets L2 - L5\n"
  353. " -v, --verbose Verbose output\n"
  354. " -b, --busy-poll Enable busy poll\n";
  355. ksft_print_msg(str, prog);
  356. }
  357. static int switch_namespace(const char *nsname)
  358. {
  359. char fqns[26] = "/var/run/netns/";
  360. int nsfd;
  361. if (!nsname || strlen(nsname) == 0)
  362. return -1;
  363. strncat(fqns, nsname, sizeof(fqns) - strlen(fqns) - 1);
  364. nsfd = open(fqns, O_RDONLY);
  365. if (nsfd == -1)
  366. exit_with_error(errno);
  367. if (setns(nsfd, 0) == -1)
  368. exit_with_error(errno);
  369. print_verbose("NS switched: %s\n", nsname);
  370. return nsfd;
  371. }
  372. static bool validate_interface(struct ifobject *ifobj)
  373. {
  374. if (!strcmp(ifobj->ifname, ""))
  375. return false;
  376. return true;
  377. }
  378. static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc,
  379. char **argv)
  380. {
  381. struct ifobject *ifobj;
  382. u32 interface_nb = 0;
  383. int option_index, c;
  384. opterr = 0;
  385. for (;;) {
  386. char *sptr, *token;
  387. c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index);
  388. if (c == -1)
  389. break;
  390. switch (c) {
  391. case 'i':
  392. if (interface_nb == 0)
  393. ifobj = ifobj_tx;
  394. else if (interface_nb == 1)
  395. ifobj = ifobj_rx;
  396. else
  397. break;
  398. sptr = strndupa(optarg, strlen(optarg));
  399. memcpy(ifobj->ifname, strsep(&sptr, ","), MAX_INTERFACE_NAME_CHARS);
  400. token = strsep(&sptr, ",");
  401. if (token)
  402. memcpy(ifobj->nsname, token, MAX_INTERFACES_NAMESPACE_CHARS);
  403. interface_nb++;
  404. break;
  405. case 'D':
  406. opt_pkt_dump = true;
  407. break;
  408. case 'v':
  409. opt_verbose = true;
  410. break;
  411. case 'b':
  412. ifobj_tx->busy_poll = true;
  413. ifobj_rx->busy_poll = true;
  414. break;
  415. default:
  416. usage(basename(argv[0]));
  417. ksft_exit_xfail();
  418. }
  419. }
  420. }
  421. static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
  422. struct ifobject *ifobj_rx)
  423. {
  424. u32 i, j;
  425. for (i = 0; i < MAX_INTERFACES; i++) {
  426. struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
  427. ifobj->xsk = &ifobj->xsk_arr[0];
  428. ifobj->use_poll = false;
  429. ifobj->use_fill_ring = true;
  430. ifobj->release_rx = true;
  431. ifobj->validation_func = NULL;
  432. if (i == 0) {
  433. ifobj->rx_on = false;
  434. ifobj->tx_on = true;
  435. ifobj->pkt_stream = test->tx_pkt_stream_default;
  436. } else {
  437. ifobj->rx_on = true;
  438. ifobj->tx_on = false;
  439. ifobj->pkt_stream = test->rx_pkt_stream_default;
  440. }
  441. memset(ifobj->umem, 0, sizeof(*ifobj->umem));
  442. ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
  443. ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
  444. if (ifobj->shared_umem && ifobj->rx_on)
  445. ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS *
  446. XSK_UMEM__DEFAULT_FRAME_SIZE;
  447. for (j = 0; j < MAX_SOCKETS; j++) {
  448. memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
  449. ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
  450. }
  451. }
  452. test->ifobj_tx = ifobj_tx;
  453. test->ifobj_rx = ifobj_rx;
  454. test->current_step = 0;
  455. test->total_steps = 1;
  456. test->nb_sockets = 1;
  457. test->fail = false;
  458. }
  459. static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
  460. struct ifobject *ifobj_rx, enum test_mode mode)
  461. {
  462. struct pkt_stream *tx_pkt_stream;
  463. struct pkt_stream *rx_pkt_stream;
  464. u32 i;
  465. tx_pkt_stream = test->tx_pkt_stream_default;
  466. rx_pkt_stream = test->rx_pkt_stream_default;
  467. memset(test, 0, sizeof(*test));
  468. test->tx_pkt_stream_default = tx_pkt_stream;
  469. test->rx_pkt_stream_default = rx_pkt_stream;
  470. for (i = 0; i < MAX_INTERFACES; i++) {
  471. struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
  472. ifobj->xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
  473. if (mode == TEST_MODE_SKB)
  474. ifobj->xdp_flags |= XDP_FLAGS_SKB_MODE;
  475. else
  476. ifobj->xdp_flags |= XDP_FLAGS_DRV_MODE;
  477. ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
  478. if (mode == TEST_MODE_ZC)
  479. ifobj->bind_flags |= XDP_ZEROCOPY;
  480. else
  481. ifobj->bind_flags |= XDP_COPY;
  482. }
  483. test->mode = mode;
  484. __test_spec_init(test, ifobj_tx, ifobj_rx);
  485. }
  486. static void test_spec_reset(struct test_spec *test)
  487. {
  488. __test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
  489. }
  490. static void test_spec_set_name(struct test_spec *test, const char *name)
  491. {
  492. strncpy(test->name, name, MAX_TEST_NAME_SIZE);
  493. }
  494. static void pkt_stream_reset(struct pkt_stream *pkt_stream)
  495. {
  496. if (pkt_stream)
  497. pkt_stream->rx_pkt_nb = 0;
  498. }
  499. static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb)
  500. {
  501. if (pkt_nb >= pkt_stream->nb_pkts)
  502. return NULL;
  503. return &pkt_stream->pkts[pkt_nb];
  504. }
  505. static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
  506. {
  507. while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) {
  508. (*pkts_sent)++;
  509. if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid)
  510. return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++];
  511. pkt_stream->rx_pkt_nb++;
  512. }
  513. return NULL;
  514. }
  515. static void pkt_stream_delete(struct pkt_stream *pkt_stream)
  516. {
  517. free(pkt_stream->pkts);
  518. free(pkt_stream);
  519. }
  520. static void pkt_stream_restore_default(struct test_spec *test)
  521. {
  522. struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream;
  523. struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream;
  524. if (tx_pkt_stream != test->tx_pkt_stream_default) {
  525. pkt_stream_delete(test->ifobj_tx->pkt_stream);
  526. test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default;
  527. }
  528. if (rx_pkt_stream != test->rx_pkt_stream_default) {
  529. pkt_stream_delete(test->ifobj_rx->pkt_stream);
  530. test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default;
  531. }
  532. }
  533. static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
  534. {
  535. struct pkt_stream *pkt_stream;
  536. pkt_stream = calloc(1, sizeof(*pkt_stream));
  537. if (!pkt_stream)
  538. return NULL;
  539. pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
  540. if (!pkt_stream->pkts) {
  541. free(pkt_stream);
  542. return NULL;
  543. }
  544. pkt_stream->nb_pkts = nb_pkts;
  545. return pkt_stream;
  546. }
  547. static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len)
  548. {
  549. pkt->addr = addr + umem->base_addr;
  550. pkt->len = len;
  551. if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom)
  552. pkt->valid = false;
  553. else
  554. pkt->valid = true;
  555. }
  556. static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
  557. {
  558. struct pkt_stream *pkt_stream;
  559. u32 i;
  560. pkt_stream = __pkt_stream_alloc(nb_pkts);
  561. if (!pkt_stream)
  562. exit_with_error(ENOMEM);
  563. for (i = 0; i < nb_pkts; i++) {
  564. pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size,
  565. pkt_len);
  566. pkt_stream->pkts[i].payload = i;
  567. }
  568. return pkt_stream;
  569. }
  570. static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem,
  571. struct pkt_stream *pkt_stream)
  572. {
  573. return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
  574. }
  575. static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
  576. {
  577. struct pkt_stream *pkt_stream;
  578. pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len);
  579. test->ifobj_tx->pkt_stream = pkt_stream;
  580. pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len);
  581. test->ifobj_rx->pkt_stream = pkt_stream;
  582. }
  583. static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
  584. int offset)
  585. {
  586. struct xsk_umem_info *umem = ifobj->umem;
  587. struct pkt_stream *pkt_stream;
  588. u32 i;
  589. pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream);
  590. for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2)
  591. pkt_set(umem, &pkt_stream->pkts[i],
  592. (i % umem->num_frames) * umem->frame_size + offset, pkt_len);
  593. ifobj->pkt_stream = pkt_stream;
  594. }
  595. static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
  596. {
  597. __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
  598. __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
  599. }
  600. static void pkt_stream_receive_half(struct test_spec *test)
  601. {
  602. struct xsk_umem_info *umem = test->ifobj_rx->umem;
  603. struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream;
  604. u32 i;
  605. test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
  606. pkt_stream->pkts[0].len);
  607. pkt_stream = test->ifobj_rx->pkt_stream;
  608. for (i = 1; i < pkt_stream->nb_pkts; i += 2)
  609. pkt_stream->pkts[i].valid = false;
  610. }
  611. static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb)
  612. {
  613. struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb);
  614. struct udphdr *udp_hdr;
  615. struct ethhdr *eth_hdr;
  616. struct iphdr *ip_hdr;
  617. void *data;
  618. if (!pkt)
  619. return NULL;
  620. if (!pkt->valid || pkt->len < MIN_PKT_SIZE)
  621. return pkt;
  622. data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr);
  623. udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr));
  624. ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr));
  625. eth_hdr = (struct ethhdr *)data;
  626. gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr);
  627. gen_ip_hdr(ifobject, ip_hdr);
  628. gen_udp_csum(udp_hdr, ip_hdr);
  629. gen_eth_hdr(ifobject, eth_hdr);
  630. return pkt;
  631. }
  632. static void __pkt_stream_generate_custom(struct ifobject *ifobj,
  633. struct pkt *pkts, u32 nb_pkts)
  634. {
  635. struct pkt_stream *pkt_stream;
  636. u32 i;
  637. pkt_stream = __pkt_stream_alloc(nb_pkts);
  638. if (!pkt_stream)
  639. exit_with_error(ENOMEM);
  640. for (i = 0; i < nb_pkts; i++) {
  641. pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr;
  642. pkt_stream->pkts[i].len = pkts[i].len;
  643. pkt_stream->pkts[i].payload = i;
  644. pkt_stream->pkts[i].valid = pkts[i].valid;
  645. }
  646. ifobj->pkt_stream = pkt_stream;
  647. }
  648. static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
  649. {
  650. __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts);
  651. __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts);
  652. }
  653. static void pkt_dump(void *pkt, u32 len)
  654. {
  655. char s[INET_ADDRSTRLEN];
  656. struct ethhdr *ethhdr;
  657. struct udphdr *udphdr;
  658. struct iphdr *iphdr;
  659. u32 payload, i;
  660. ethhdr = pkt;
  661. iphdr = pkt + sizeof(*ethhdr);
  662. udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr);
  663. /*extract L2 frame */
  664. fprintf(stdout, "DEBUG>> L2: dst mac: ");
  665. for (i = 0; i < ETH_ALEN; i++)
  666. fprintf(stdout, "%02X", ethhdr->h_dest[i]);
  667. fprintf(stdout, "\nDEBUG>> L2: src mac: ");
  668. for (i = 0; i < ETH_ALEN; i++)
  669. fprintf(stdout, "%02X", ethhdr->h_source[i]);
  670. /*extract L3 frame */
  671. fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl);
  672. fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n",
  673. inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s)));
  674. fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n",
  675. inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s)));
  676. /*extract L4 frame */
  677. fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source));
  678. fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest));
  679. /*extract L5 frame */
  680. payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE)));
  681. fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload);
  682. fprintf(stdout, "---------------------------------------\n");
  683. }
  684. static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr,
  685. u64 pkt_stream_addr)
  686. {
  687. u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
  688. u32 offset = addr % umem->frame_size, expected_offset = 0;
  689. if (!pkt_stream->use_addr_for_fill)
  690. pkt_stream_addr = 0;
  691. expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
  692. if (offset == expected_offset)
  693. return true;
  694. ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
  695. return false;
  696. }
  697. static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
  698. {
  699. void *data = xsk_umem__get_data(buffer, addr);
  700. struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr));
  701. if (!pkt) {
  702. ksft_print_msg("[%s] too many packets received\n", __func__);
  703. return false;
  704. }
  705. if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) {
  706. /* Do not try to verify packets that are smaller than minimum size. */
  707. return true;
  708. }
  709. if (pkt->len != len) {
  710. ksft_print_msg("[%s] expected length [%d], got length [%d]\n",
  711. __func__, pkt->len, len);
  712. return false;
  713. }
  714. if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) {
  715. u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE)));
  716. if (opt_pkt_dump)
  717. pkt_dump(data, PKT_SIZE);
  718. if (pkt->payload != seqnum) {
  719. ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n",
  720. __func__, pkt->payload, seqnum);
  721. return false;
  722. }
  723. } else {
  724. ksft_print_msg("Invalid frame received: ");
  725. ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version,
  726. iphdr->tos);
  727. return false;
  728. }
  729. return true;
  730. }
  731. static void kick_tx(struct xsk_socket_info *xsk)
  732. {
  733. int ret;
  734. ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
  735. if (ret >= 0)
  736. return;
  737. if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
  738. usleep(100);
  739. return;
  740. }
  741. exit_with_error(errno);
  742. }
  743. static void kick_rx(struct xsk_socket_info *xsk)
  744. {
  745. int ret;
  746. ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
  747. if (ret < 0)
  748. exit_with_error(errno);
  749. }
  750. static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
  751. {
  752. unsigned int rcvd;
  753. u32 idx;
  754. if (xsk_ring_prod__needs_wakeup(&xsk->tx))
  755. kick_tx(xsk);
  756. rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
  757. if (rcvd) {
  758. if (rcvd > xsk->outstanding_tx) {
  759. u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
  760. ksft_print_msg("[%s] Too many packets completed\n", __func__);
  761. ksft_print_msg("Last completion address: %llx\n", addr);
  762. return TEST_FAILURE;
  763. }
  764. xsk_ring_cons__release(&xsk->umem->cq, rcvd);
  765. xsk->outstanding_tx -= rcvd;
  766. }
  767. return TEST_PASS;
  768. }
  769. static int receive_pkts(struct test_spec *test, struct pollfd *fds)
  770. {
  771. struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
  772. struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
  773. u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0;
  774. struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
  775. struct ifobject *ifobj = test->ifobj_rx;
  776. struct xsk_umem_info *umem = xsk->umem;
  777. struct pkt *pkt;
  778. int ret;
  779. ret = gettimeofday(&tv_now, NULL);
  780. if (ret)
  781. exit_with_error(errno);
  782. timeradd(&tv_now, &tv_timeout, &tv_end);
  783. pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
  784. while (pkt) {
  785. ret = gettimeofday(&tv_now, NULL);
  786. if (ret)
  787. exit_with_error(errno);
  788. if (timercmp(&tv_now, &tv_end, >)) {
  789. ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
  790. return TEST_FAILURE;
  791. }
  792. kick_rx(xsk);
  793. if (ifobj->use_poll) {
  794. ret = poll(fds, 1, POLL_TMOUT);
  795. if (ret < 0)
  796. exit_with_error(errno);
  797. if (!ret) {
  798. if (!is_umem_valid(test->ifobj_tx))
  799. return TEST_PASS;
  800. ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
  801. return TEST_FAILURE;
  802. }
  803. if (!(fds->revents & POLLIN))
  804. continue;
  805. }
  806. rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
  807. if (!rcvd)
  808. continue;
  809. if (ifobj->use_fill_ring) {
  810. ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
  811. while (ret != rcvd) {
  812. if (ret < 0)
  813. exit_with_error(-ret);
  814. if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
  815. ret = poll(fds, 1, POLL_TMOUT);
  816. if (ret < 0)
  817. exit_with_error(errno);
  818. }
  819. ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
  820. }
  821. }
  822. for (i = 0; i < rcvd; i++) {
  823. const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
  824. u64 addr = desc->addr, orig;
  825. orig = xsk_umem__extract_addr(addr);
  826. addr = xsk_umem__add_offset_to_addr(addr);
  827. if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) ||
  828. !is_offset_correct(umem, pkt_stream, addr, pkt->addr))
  829. return TEST_FAILURE;
  830. if (ifobj->use_fill_ring)
  831. *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
  832. pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
  833. }
  834. if (ifobj->use_fill_ring)
  835. xsk_ring_prod__submit(&umem->fq, rcvd);
  836. if (ifobj->release_rx)
  837. xsk_ring_cons__release(&xsk->rx, rcvd);
  838. pthread_mutex_lock(&pacing_mutex);
  839. pkts_in_flight -= pkts_sent;
  840. if (pkts_in_flight < umem->num_frames)
  841. pthread_cond_signal(&pacing_cond);
  842. pthread_mutex_unlock(&pacing_mutex);
  843. pkts_sent = 0;
  844. }
  845. return TEST_PASS;
  846. }
  847. static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds,
  848. bool timeout)
  849. {
  850. struct xsk_socket_info *xsk = ifobject->xsk;
  851. bool use_poll = ifobject->use_poll;
  852. u32 i, idx = 0, ret, valid_pkts = 0;
  853. while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
  854. if (use_poll) {
  855. ret = poll(fds, 1, POLL_TMOUT);
  856. if (timeout) {
  857. if (ret < 0) {
  858. ksft_print_msg("ERROR: [%s] Poll error %d\n",
  859. __func__, errno);
  860. return TEST_FAILURE;
  861. }
  862. if (ret == 0)
  863. return TEST_PASS;
  864. break;
  865. }
  866. if (ret <= 0) {
  867. ksft_print_msg("ERROR: [%s] Poll error %d\n",
  868. __func__, errno);
  869. return TEST_FAILURE;
  870. }
  871. }
  872. complete_pkts(xsk, BATCH_SIZE);
  873. }
  874. for (i = 0; i < BATCH_SIZE; i++) {
  875. struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
  876. struct pkt *pkt = pkt_generate(ifobject, *pkt_nb);
  877. if (!pkt)
  878. break;
  879. tx_desc->addr = pkt->addr;
  880. tx_desc->len = pkt->len;
  881. (*pkt_nb)++;
  882. if (pkt->valid)
  883. valid_pkts++;
  884. }
  885. pthread_mutex_lock(&pacing_mutex);
  886. pkts_in_flight += valid_pkts;
  887. /* pkts_in_flight might be negative if many invalid packets are sent */
  888. if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) {
  889. kick_tx(xsk);
  890. pthread_cond_wait(&pacing_cond, &pacing_mutex);
  891. }
  892. pthread_mutex_unlock(&pacing_mutex);
  893. xsk_ring_prod__submit(&xsk->tx, i);
  894. xsk->outstanding_tx += valid_pkts;
  895. if (use_poll) {
  896. ret = poll(fds, 1, POLL_TMOUT);
  897. if (ret <= 0) {
  898. if (ret == 0 && timeout)
  899. return TEST_PASS;
  900. ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
  901. return TEST_FAILURE;
  902. }
  903. }
  904. if (!timeout) {
  905. if (complete_pkts(xsk, i))
  906. return TEST_FAILURE;
  907. usleep(10);
  908. return TEST_PASS;
  909. }
  910. return TEST_CONTINUE;
  911. }
  912. static void wait_for_tx_completion(struct xsk_socket_info *xsk)
  913. {
  914. while (xsk->outstanding_tx)
  915. complete_pkts(xsk, BATCH_SIZE);
  916. }
  917. static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
  918. {
  919. bool timeout = !is_umem_valid(test->ifobj_rx);
  920. struct pollfd fds = { };
  921. u32 pkt_cnt = 0, ret;
  922. fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
  923. fds.events = POLLOUT;
  924. while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
  925. ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout);
  926. if ((ret || test->fail) && !timeout)
  927. return TEST_FAILURE;
  928. else if (ret == TEST_PASS && timeout)
  929. return ret;
  930. }
  931. wait_for_tx_completion(ifobject->xsk);
  932. return TEST_PASS;
  933. }
  934. static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
  935. {
  936. int fd = xsk_socket__fd(xsk), err;
  937. socklen_t optlen, expected_len;
  938. optlen = sizeof(*stats);
  939. err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
  940. if (err) {
  941. ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
  942. __func__, -err, strerror(-err));
  943. return TEST_FAILURE;
  944. }
  945. expected_len = sizeof(struct xdp_statistics);
  946. if (optlen != expected_len) {
  947. ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
  948. __func__, expected_len, optlen);
  949. return TEST_FAILURE;
  950. }
  951. return TEST_PASS;
  952. }
  953. static int validate_rx_dropped(struct ifobject *ifobject)
  954. {
  955. struct xsk_socket *xsk = ifobject->xsk->xsk;
  956. struct xdp_statistics stats;
  957. int err;
  958. kick_rx(ifobject->xsk);
  959. err = get_xsk_stats(xsk, &stats);
  960. if (err)
  961. return TEST_FAILURE;
  962. /* The receiver calls getsockopt after receiving the last (valid)
  963. * packet which is not the final packet sent in this test (valid and
  964. * invalid packets are sent in alternating fashion with the final
  965. * packet being invalid). Since the last packet may or may not have
  966. * been dropped already, both outcomes must be allowed.
  967. */
  968. if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 ||
  969. stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1)
  970. return TEST_PASS;
  971. return TEST_FAILURE;
  972. }
  973. static int validate_rx_full(struct ifobject *ifobject)
  974. {
  975. struct xsk_socket *xsk = ifobject->xsk->xsk;
  976. struct xdp_statistics stats;
  977. int err;
  978. usleep(1000);
  979. kick_rx(ifobject->xsk);
  980. err = get_xsk_stats(xsk, &stats);
  981. if (err)
  982. return TEST_FAILURE;
  983. if (stats.rx_ring_full)
  984. return TEST_PASS;
  985. return TEST_FAILURE;
  986. }
  987. static int validate_fill_empty(struct ifobject *ifobject)
  988. {
  989. struct xsk_socket *xsk = ifobject->xsk->xsk;
  990. struct xdp_statistics stats;
  991. int err;
  992. usleep(1000);
  993. kick_rx(ifobject->xsk);
  994. err = get_xsk_stats(xsk, &stats);
  995. if (err)
  996. return TEST_FAILURE;
  997. if (stats.rx_fill_ring_empty_descs)
  998. return TEST_PASS;
  999. return TEST_FAILURE;
  1000. }
  1001. static int validate_tx_invalid_descs(struct ifobject *ifobject)
  1002. {
  1003. struct xsk_socket *xsk = ifobject->xsk->xsk;
  1004. int fd = xsk_socket__fd(xsk);
  1005. struct xdp_statistics stats;
  1006. socklen_t optlen;
  1007. int err;
  1008. optlen = sizeof(stats);
  1009. err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
  1010. if (err) {
  1011. ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
  1012. __func__, -err, strerror(-err));
  1013. return TEST_FAILURE;
  1014. }
  1015. if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) {
  1016. ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
  1017. __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
  1018. return TEST_FAILURE;
  1019. }
  1020. return TEST_PASS;
  1021. }
  1022. static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject,
  1023. struct xsk_umem_info *umem, bool tx)
  1024. {
  1025. int i, ret;
  1026. for (i = 0; i < test->nb_sockets; i++) {
  1027. bool shared = (ifobject->shared_umem && tx) ? true : !!i;
  1028. u32 ctr = 0;
  1029. while (ctr++ < SOCK_RECONF_CTR) {
  1030. ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem,
  1031. ifobject, shared);
  1032. if (!ret)
  1033. break;
  1034. /* Retry if it fails as xsk_socket__create() is asynchronous */
  1035. if (ctr >= SOCK_RECONF_CTR)
  1036. exit_with_error(-ret);
  1037. usleep(USLEEP_MAX);
  1038. }
  1039. if (ifobject->busy_poll)
  1040. enable_busy_poll(&ifobject->xsk_arr[i]);
  1041. }
  1042. }
  1043. static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
  1044. {
  1045. xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
  1046. ifobject->xsk = &ifobject->xsk_arr[0];
  1047. ifobject->xsk_map_fd = test->ifobj_rx->xsk_map_fd;
  1048. memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
  1049. }
  1050. static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
  1051. {
  1052. u32 idx = 0, i, buffers_to_fill;
  1053. int ret;
  1054. if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
  1055. buffers_to_fill = umem->num_frames;
  1056. else
  1057. buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
  1058. ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
  1059. if (ret != buffers_to_fill)
  1060. exit_with_error(ENOSPC);
  1061. for (i = 0; i < buffers_to_fill; i++) {
  1062. u64 addr;
  1063. if (pkt_stream->use_addr_for_fill) {
  1064. struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i);
  1065. if (!pkt)
  1066. break;
  1067. addr = pkt->addr;
  1068. } else {
  1069. addr = i * umem->frame_size;
  1070. }
  1071. *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
  1072. }
  1073. xsk_ring_prod__submit(&umem->fq, buffers_to_fill);
  1074. }
  1075. static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
  1076. {
  1077. u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
  1078. int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
  1079. LIBBPF_OPTS(bpf_xdp_query_opts, opts);
  1080. int ret, ifindex;
  1081. void *bufs;
  1082. ifobject->ns_fd = switch_namespace(ifobject->nsname);
  1083. if (ifobject->umem->unaligned_mode)
  1084. mmap_flags |= MAP_HUGETLB;
  1085. if (ifobject->shared_umem)
  1086. umem_sz *= 2;
  1087. bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
  1088. if (bufs == MAP_FAILED)
  1089. exit_with_error(errno);
  1090. ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz);
  1091. if (ret)
  1092. exit_with_error(-ret);
  1093. xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream);
  1094. xsk_configure_socket(test, ifobject, ifobject->umem, false);
  1095. ifobject->xsk = &ifobject->xsk_arr[0];
  1096. if (!ifobject->rx_on)
  1097. return;
  1098. ifindex = if_nametoindex(ifobject->ifname);
  1099. if (!ifindex)
  1100. exit_with_error(errno);
  1101. ret = xsk_setup_xdp_prog_xsk(ifobject->xsk->xsk, &ifobject->xsk_map_fd);
  1102. if (ret)
  1103. exit_with_error(-ret);
  1104. ret = bpf_xdp_query(ifindex, ifobject->xdp_flags, &opts);
  1105. if (ret)
  1106. exit_with_error(-ret);
  1107. if (ifobject->xdp_flags & XDP_FLAGS_SKB_MODE) {
  1108. if (opts.attach_mode != XDP_ATTACHED_SKB) {
  1109. ksft_print_msg("ERROR: [%s] XDP prog not in SKB mode\n");
  1110. exit_with_error(EINVAL);
  1111. }
  1112. } else if (ifobject->xdp_flags & XDP_FLAGS_DRV_MODE) {
  1113. if (opts.attach_mode != XDP_ATTACHED_DRV) {
  1114. ksft_print_msg("ERROR: [%s] XDP prog not in DRV mode\n");
  1115. exit_with_error(EINVAL);
  1116. }
  1117. }
  1118. ret = xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
  1119. if (ret)
  1120. exit_with_error(errno);
  1121. }
  1122. static void *worker_testapp_validate_tx(void *arg)
  1123. {
  1124. struct test_spec *test = (struct test_spec *)arg;
  1125. struct ifobject *ifobject = test->ifobj_tx;
  1126. int err;
  1127. if (test->current_step == 1) {
  1128. if (!ifobject->shared_umem)
  1129. thread_common_ops(test, ifobject);
  1130. else
  1131. thread_common_ops_tx(test, ifobject);
  1132. }
  1133. print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
  1134. ifobject->ifname);
  1135. err = send_pkts(test, ifobject);
  1136. if (!err && ifobject->validation_func)
  1137. err = ifobject->validation_func(ifobject);
  1138. if (err)
  1139. report_failure(test);
  1140. pthread_exit(NULL);
  1141. }
  1142. static void *worker_testapp_validate_rx(void *arg)
  1143. {
  1144. struct test_spec *test = (struct test_spec *)arg;
  1145. struct ifobject *ifobject = test->ifobj_rx;
  1146. struct pollfd fds = { };
  1147. int id = 0;
  1148. int err;
  1149. if (test->current_step == 1) {
  1150. thread_common_ops(test, ifobject);
  1151. } else {
  1152. bpf_map_delete_elem(ifobject->xsk_map_fd, &id);
  1153. xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
  1154. }
  1155. fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
  1156. fds.events = POLLIN;
  1157. pthread_barrier_wait(&barr);
  1158. err = receive_pkts(test, &fds);
  1159. if (!err && ifobject->validation_func)
  1160. err = ifobject->validation_func(ifobject);
  1161. if (err) {
  1162. report_failure(test);
  1163. pthread_mutex_lock(&pacing_mutex);
  1164. pthread_cond_signal(&pacing_cond);
  1165. pthread_mutex_unlock(&pacing_mutex);
  1166. }
  1167. pthread_exit(NULL);
  1168. }
  1169. static void testapp_clean_xsk_umem(struct ifobject *ifobj)
  1170. {
  1171. u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
  1172. if (ifobj->shared_umem)
  1173. umem_sz *= 2;
  1174. xsk_umem__delete(ifobj->umem->umem);
  1175. munmap(ifobj->umem->buffer, umem_sz);
  1176. }
  1177. static void handler(int signum)
  1178. {
  1179. pthread_exit(NULL);
  1180. }
  1181. static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj,
  1182. enum test_type type)
  1183. {
  1184. bool old_shared_umem = ifobj->shared_umem;
  1185. pthread_t t0;
  1186. if (pthread_barrier_init(&barr, NULL, 2))
  1187. exit_with_error(errno);
  1188. test->current_step++;
  1189. if (type == TEST_TYPE_POLL_RXQ_TMOUT)
  1190. pkt_stream_reset(ifobj->pkt_stream);
  1191. pkts_in_flight = 0;
  1192. test->ifobj_rx->shared_umem = false;
  1193. test->ifobj_tx->shared_umem = false;
  1194. signal(SIGUSR1, handler);
  1195. /* Spawn thread */
  1196. pthread_create(&t0, NULL, ifobj->func_ptr, test);
  1197. if (type != TEST_TYPE_POLL_TXQ_TMOUT)
  1198. pthread_barrier_wait(&barr);
  1199. if (pthread_barrier_destroy(&barr))
  1200. exit_with_error(errno);
  1201. pthread_kill(t0, SIGUSR1);
  1202. pthread_join(t0, NULL);
  1203. if (test->total_steps == test->current_step || test->fail) {
  1204. xsk_socket__delete(ifobj->xsk->xsk);
  1205. testapp_clean_xsk_umem(ifobj);
  1206. }
  1207. test->ifobj_rx->shared_umem = old_shared_umem;
  1208. test->ifobj_tx->shared_umem = old_shared_umem;
  1209. return !!test->fail;
  1210. }
  1211. static int testapp_validate_traffic(struct test_spec *test)
  1212. {
  1213. struct ifobject *ifobj_tx = test->ifobj_tx;
  1214. struct ifobject *ifobj_rx = test->ifobj_rx;
  1215. pthread_t t0, t1;
  1216. if (pthread_barrier_init(&barr, NULL, 2))
  1217. exit_with_error(errno);
  1218. test->current_step++;
  1219. pkt_stream_reset(ifobj_rx->pkt_stream);
  1220. pkts_in_flight = 0;
  1221. /*Spawn RX thread */
  1222. pthread_create(&t0, NULL, ifobj_rx->func_ptr, test);
  1223. pthread_barrier_wait(&barr);
  1224. if (pthread_barrier_destroy(&barr))
  1225. exit_with_error(errno);
  1226. /*Spawn TX thread */
  1227. pthread_create(&t1, NULL, ifobj_tx->func_ptr, test);
  1228. pthread_join(t1, NULL);
  1229. pthread_join(t0, NULL);
  1230. if (test->total_steps == test->current_step || test->fail) {
  1231. xsk_socket__delete(ifobj_tx->xsk->xsk);
  1232. xsk_socket__delete(ifobj_rx->xsk->xsk);
  1233. testapp_clean_xsk_umem(ifobj_rx);
  1234. if (!ifobj_tx->shared_umem)
  1235. testapp_clean_xsk_umem(ifobj_tx);
  1236. }
  1237. return !!test->fail;
  1238. }
  1239. static void testapp_teardown(struct test_spec *test)
  1240. {
  1241. int i;
  1242. test_spec_set_name(test, "TEARDOWN");
  1243. for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
  1244. if (testapp_validate_traffic(test))
  1245. return;
  1246. test_spec_reset(test);
  1247. }
  1248. }
  1249. static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
  1250. {
  1251. thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
  1252. struct ifobject *tmp_ifobj = (*ifobj1);
  1253. (*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
  1254. (*ifobj2)->func_ptr = tmp_func_ptr;
  1255. *ifobj1 = *ifobj2;
  1256. *ifobj2 = tmp_ifobj;
  1257. }
  1258. static void testapp_bidi(struct test_spec *test)
  1259. {
  1260. test_spec_set_name(test, "BIDIRECTIONAL");
  1261. test->ifobj_tx->rx_on = true;
  1262. test->ifobj_rx->tx_on = true;
  1263. test->total_steps = 2;
  1264. if (testapp_validate_traffic(test))
  1265. return;
  1266. print_verbose("Switching Tx/Rx vectors\n");
  1267. swap_directions(&test->ifobj_rx, &test->ifobj_tx);
  1268. testapp_validate_traffic(test);
  1269. swap_directions(&test->ifobj_rx, &test->ifobj_tx);
  1270. }
  1271. static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
  1272. {
  1273. int ret;
  1274. xsk_socket__delete(ifobj_tx->xsk->xsk);
  1275. xsk_socket__delete(ifobj_rx->xsk->xsk);
  1276. ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
  1277. ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
  1278. ret = xsk_socket__update_xskmap(ifobj_rx->xsk->xsk, ifobj_rx->xsk_map_fd);
  1279. if (ret)
  1280. exit_with_error(errno);
  1281. }
  1282. static void testapp_bpf_res(struct test_spec *test)
  1283. {
  1284. test_spec_set_name(test, "BPF_RES");
  1285. test->total_steps = 2;
  1286. test->nb_sockets = 2;
  1287. if (testapp_validate_traffic(test))
  1288. return;
  1289. swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
  1290. testapp_validate_traffic(test);
  1291. }
  1292. static void testapp_headroom(struct test_spec *test)
  1293. {
  1294. test_spec_set_name(test, "UMEM_HEADROOM");
  1295. test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
  1296. testapp_validate_traffic(test);
  1297. }
  1298. static void testapp_stats_rx_dropped(struct test_spec *test)
  1299. {
  1300. test_spec_set_name(test, "STAT_RX_DROPPED");
  1301. pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
  1302. test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
  1303. XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
  1304. pkt_stream_receive_half(test);
  1305. test->ifobj_rx->validation_func = validate_rx_dropped;
  1306. testapp_validate_traffic(test);
  1307. }
  1308. static void testapp_stats_tx_invalid_descs(struct test_spec *test)
  1309. {
  1310. test_spec_set_name(test, "STAT_TX_INVALID");
  1311. pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
  1312. test->ifobj_tx->validation_func = validate_tx_invalid_descs;
  1313. testapp_validate_traffic(test);
  1314. pkt_stream_restore_default(test);
  1315. }
  1316. static void testapp_stats_rx_full(struct test_spec *test)
  1317. {
  1318. test_spec_set_name(test, "STAT_RX_FULL");
  1319. pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
  1320. test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
  1321. DEFAULT_UMEM_BUFFERS, PKT_SIZE);
  1322. if (!test->ifobj_rx->pkt_stream)
  1323. exit_with_error(ENOMEM);
  1324. test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
  1325. test->ifobj_rx->release_rx = false;
  1326. test->ifobj_rx->validation_func = validate_rx_full;
  1327. testapp_validate_traffic(test);
  1328. pkt_stream_restore_default(test);
  1329. }
  1330. static void testapp_stats_fill_empty(struct test_spec *test)
  1331. {
  1332. test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
  1333. pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
  1334. test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
  1335. DEFAULT_UMEM_BUFFERS, PKT_SIZE);
  1336. if (!test->ifobj_rx->pkt_stream)
  1337. exit_with_error(ENOMEM);
  1338. test->ifobj_rx->use_fill_ring = false;
  1339. test->ifobj_rx->validation_func = validate_fill_empty;
  1340. testapp_validate_traffic(test);
  1341. pkt_stream_restore_default(test);
  1342. }
  1343. /* Simple test */
  1344. static bool hugepages_present(struct ifobject *ifobject)
  1345. {
  1346. const size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size;
  1347. void *bufs;
  1348. bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
  1349. MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
  1350. if (bufs == MAP_FAILED)
  1351. return false;
  1352. munmap(bufs, mmap_sz);
  1353. return true;
  1354. }
  1355. static bool testapp_unaligned(struct test_spec *test)
  1356. {
  1357. if (!hugepages_present(test->ifobj_tx)) {
  1358. ksft_test_result_skip("No 2M huge pages present.\n");
  1359. return false;
  1360. }
  1361. test_spec_set_name(test, "UNALIGNED_MODE");
  1362. test->ifobj_tx->umem->unaligned_mode = true;
  1363. test->ifobj_rx->umem->unaligned_mode = true;
  1364. /* Let half of the packets straddle a buffer boundrary */
  1365. pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2);
  1366. test->ifobj_rx->pkt_stream->use_addr_for_fill = true;
  1367. testapp_validate_traffic(test);
  1368. pkt_stream_restore_default(test);
  1369. return true;
  1370. }
  1371. static void testapp_single_pkt(struct test_spec *test)
  1372. {
  1373. struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}};
  1374. pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
  1375. testapp_validate_traffic(test);
  1376. pkt_stream_restore_default(test);
  1377. }
  1378. static void testapp_invalid_desc(struct test_spec *test)
  1379. {
  1380. u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
  1381. struct pkt pkts[] = {
  1382. /* Zero packet address allowed */
  1383. {0, PKT_SIZE, 0, true},
  1384. /* Allowed packet */
  1385. {0x1000, PKT_SIZE, 0, true},
  1386. /* Straddling the start of umem */
  1387. {-2, PKT_SIZE, 0, false},
  1388. /* Packet too large */
  1389. {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
  1390. /* After umem ends */
  1391. {umem_size, PKT_SIZE, 0, false},
  1392. /* Straddle the end of umem */
  1393. {umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false},
  1394. /* Straddle a page boundrary */
  1395. {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false},
  1396. /* Straddle a 2K boundrary */
  1397. {0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true},
  1398. /* Valid packet for synch so that something is received */
  1399. {0x4000, PKT_SIZE, 0, true}};
  1400. if (test->ifobj_tx->umem->unaligned_mode) {
  1401. /* Crossing a page boundrary allowed */
  1402. pkts[6].valid = true;
  1403. }
  1404. if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
  1405. /* Crossing a 2K frame size boundrary not allowed */
  1406. pkts[7].valid = false;
  1407. }
  1408. if (test->ifobj_tx->shared_umem) {
  1409. pkts[4].addr += umem_size;
  1410. pkts[5].addr += umem_size;
  1411. }
  1412. pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
  1413. testapp_validate_traffic(test);
  1414. pkt_stream_restore_default(test);
  1415. }
  1416. static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
  1417. const char *dst_ip, const char *src_ip, const u16 dst_port,
  1418. const u16 src_port, thread_func_t func_ptr)
  1419. {
  1420. struct in_addr ip;
  1421. memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
  1422. memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
  1423. inet_aton(dst_ip, &ip);
  1424. ifobj->dst_ip = ip.s_addr;
  1425. inet_aton(src_ip, &ip);
  1426. ifobj->src_ip = ip.s_addr;
  1427. ifobj->dst_port = dst_port;
  1428. ifobj->src_port = src_port;
  1429. ifobj->func_ptr = func_ptr;
  1430. }
  1431. static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
  1432. {
  1433. switch (type) {
  1434. case TEST_TYPE_STATS_RX_DROPPED:
  1435. if (mode == TEST_MODE_ZC) {
  1436. ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
  1437. return;
  1438. }
  1439. testapp_stats_rx_dropped(test);
  1440. break;
  1441. case TEST_TYPE_STATS_TX_INVALID_DESCS:
  1442. testapp_stats_tx_invalid_descs(test);
  1443. break;
  1444. case TEST_TYPE_STATS_RX_FULL:
  1445. testapp_stats_rx_full(test);
  1446. break;
  1447. case TEST_TYPE_STATS_FILL_EMPTY:
  1448. testapp_stats_fill_empty(test);
  1449. break;
  1450. case TEST_TYPE_TEARDOWN:
  1451. testapp_teardown(test);
  1452. break;
  1453. case TEST_TYPE_BIDI:
  1454. testapp_bidi(test);
  1455. break;
  1456. case TEST_TYPE_BPF_RES:
  1457. testapp_bpf_res(test);
  1458. break;
  1459. case TEST_TYPE_RUN_TO_COMPLETION:
  1460. test_spec_set_name(test, "RUN_TO_COMPLETION");
  1461. testapp_validate_traffic(test);
  1462. break;
  1463. case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
  1464. test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
  1465. testapp_single_pkt(test);
  1466. break;
  1467. case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
  1468. test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
  1469. test->ifobj_tx->umem->frame_size = 2048;
  1470. test->ifobj_rx->umem->frame_size = 2048;
  1471. pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE);
  1472. testapp_validate_traffic(test);
  1473. pkt_stream_restore_default(test);
  1474. break;
  1475. case TEST_TYPE_RX_POLL:
  1476. test->ifobj_rx->use_poll = true;
  1477. test_spec_set_name(test, "POLL_RX");
  1478. testapp_validate_traffic(test);
  1479. break;
  1480. case TEST_TYPE_TX_POLL:
  1481. test->ifobj_tx->use_poll = true;
  1482. test_spec_set_name(test, "POLL_TX");
  1483. testapp_validate_traffic(test);
  1484. break;
  1485. case TEST_TYPE_POLL_TXQ_TMOUT:
  1486. test_spec_set_name(test, "POLL_TXQ_FULL");
  1487. test->ifobj_tx->use_poll = true;
  1488. /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
  1489. test->ifobj_tx->umem->frame_size = 2048;
  1490. pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
  1491. testapp_validate_traffic_single_thread(test, test->ifobj_tx, type);
  1492. pkt_stream_restore_default(test);
  1493. break;
  1494. case TEST_TYPE_POLL_RXQ_TMOUT:
  1495. test_spec_set_name(test, "POLL_RXQ_EMPTY");
  1496. test->ifobj_rx->use_poll = true;
  1497. testapp_validate_traffic_single_thread(test, test->ifobj_rx, type);
  1498. break;
  1499. case TEST_TYPE_ALIGNED_INV_DESC:
  1500. test_spec_set_name(test, "ALIGNED_INV_DESC");
  1501. testapp_invalid_desc(test);
  1502. break;
  1503. case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
  1504. test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
  1505. test->ifobj_tx->umem->frame_size = 2048;
  1506. test->ifobj_rx->umem->frame_size = 2048;
  1507. testapp_invalid_desc(test);
  1508. break;
  1509. case TEST_TYPE_UNALIGNED_INV_DESC:
  1510. if (!hugepages_present(test->ifobj_tx)) {
  1511. ksft_test_result_skip("No 2M huge pages present.\n");
  1512. return;
  1513. }
  1514. test_spec_set_name(test, "UNALIGNED_INV_DESC");
  1515. test->ifobj_tx->umem->unaligned_mode = true;
  1516. test->ifobj_rx->umem->unaligned_mode = true;
  1517. testapp_invalid_desc(test);
  1518. break;
  1519. case TEST_TYPE_UNALIGNED:
  1520. if (!testapp_unaligned(test))
  1521. return;
  1522. break;
  1523. case TEST_TYPE_HEADROOM:
  1524. testapp_headroom(test);
  1525. break;
  1526. default:
  1527. break;
  1528. }
  1529. if (!test->fail)
  1530. ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
  1531. test->name);
  1532. }
  1533. static struct ifobject *ifobject_create(void)
  1534. {
  1535. struct ifobject *ifobj;
  1536. ifobj = calloc(1, sizeof(struct ifobject));
  1537. if (!ifobj)
  1538. return NULL;
  1539. ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
  1540. if (!ifobj->xsk_arr)
  1541. goto out_xsk_arr;
  1542. ifobj->umem = calloc(1, sizeof(*ifobj->umem));
  1543. if (!ifobj->umem)
  1544. goto out_umem;
  1545. ifobj->ns_fd = -1;
  1546. return ifobj;
  1547. out_umem:
  1548. free(ifobj->xsk_arr);
  1549. out_xsk_arr:
  1550. free(ifobj);
  1551. return NULL;
  1552. }
  1553. static void ifobject_delete(struct ifobject *ifobj)
  1554. {
  1555. if (ifobj->ns_fd != -1)
  1556. close(ifobj->ns_fd);
  1557. free(ifobj->umem);
  1558. free(ifobj->xsk_arr);
  1559. free(ifobj);
  1560. }
  1561. static bool is_xdp_supported(struct ifobject *ifobject)
  1562. {
  1563. int flags = XDP_FLAGS_DRV_MODE;
  1564. LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags);
  1565. struct bpf_insn insns[2] = {
  1566. BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
  1567. BPF_EXIT_INSN()
  1568. };
  1569. int ifindex = if_nametoindex(ifobject->ifname);
  1570. int prog_fd, insn_cnt = ARRAY_SIZE(insns);
  1571. int err;
  1572. prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
  1573. if (prog_fd < 0)
  1574. return false;
  1575. err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL);
  1576. if (err) {
  1577. close(prog_fd);
  1578. return false;
  1579. }
  1580. bpf_xdp_detach(ifindex, flags, NULL);
  1581. close(prog_fd);
  1582. return true;
  1583. }
  1584. int main(int argc, char **argv)
  1585. {
  1586. struct pkt_stream *rx_pkt_stream_default;
  1587. struct pkt_stream *tx_pkt_stream_default;
  1588. struct ifobject *ifobj_tx, *ifobj_rx;
  1589. int modes = TEST_MODE_SKB + 1;
  1590. u32 i, j, failed_tests = 0;
  1591. struct test_spec test;
  1592. bool shared_umem;
  1593. /* Use libbpf 1.0 API mode */
  1594. libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
  1595. ifobj_tx = ifobject_create();
  1596. if (!ifobj_tx)
  1597. exit_with_error(ENOMEM);
  1598. ifobj_rx = ifobject_create();
  1599. if (!ifobj_rx)
  1600. exit_with_error(ENOMEM);
  1601. setlocale(LC_ALL, "");
  1602. parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
  1603. shared_umem = !strcmp(ifobj_tx->ifname, ifobj_rx->ifname);
  1604. ifobj_tx->shared_umem = shared_umem;
  1605. ifobj_rx->shared_umem = shared_umem;
  1606. if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
  1607. usage(basename(argv[0]));
  1608. ksft_exit_xfail();
  1609. }
  1610. init_iface(ifobj_tx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2,
  1611. worker_testapp_validate_tx);
  1612. init_iface(ifobj_rx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1,
  1613. worker_testapp_validate_rx);
  1614. if (is_xdp_supported(ifobj_tx)) {
  1615. modes++;
  1616. if (ifobj_zc_avail(ifobj_tx))
  1617. modes++;
  1618. }
  1619. test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
  1620. tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
  1621. rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
  1622. if (!tx_pkt_stream_default || !rx_pkt_stream_default)
  1623. exit_with_error(ENOMEM);
  1624. test.tx_pkt_stream_default = tx_pkt_stream_default;
  1625. test.rx_pkt_stream_default = rx_pkt_stream_default;
  1626. ksft_set_plan(modes * TEST_TYPE_MAX);
  1627. for (i = 0; i < modes; i++)
  1628. for (j = 0; j < TEST_TYPE_MAX; j++) {
  1629. test_spec_init(&test, ifobj_tx, ifobj_rx, i);
  1630. run_pkt_test(&test, i, j);
  1631. usleep(USLEEP_MAX);
  1632. if (test.fail)
  1633. failed_tests++;
  1634. }
  1635. pkt_stream_delete(tx_pkt_stream_default);
  1636. pkt_stream_delete(rx_pkt_stream_default);
  1637. ifobject_delete(ifobj_tx);
  1638. ifobject_delete(ifobj_rx);
  1639. if (failed_tests)
  1640. ksft_exit_fail();
  1641. else
  1642. ksft_exit_pass();
  1643. }