xsk.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* XDP sockets
  3. *
  4. * AF_XDP sockets allows a channel between XDP programs and userspace
  5. * applications.
  6. * Copyright(c) 2018 Intel Corporation.
  7. *
  8. * Author(s): Björn Töpel <[email protected]>
  9. * Magnus Karlsson <[email protected]>
  10. */
  11. #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
  12. #include <linux/if_xdp.h>
  13. #include <linux/init.h>
  14. #include <linux/sched/mm.h>
  15. #include <linux/sched/signal.h>
  16. #include <linux/sched/task.h>
  17. #include <linux/socket.h>
  18. #include <linux/file.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/net.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/rculist.h>
  23. #include <net/xdp_sock_drv.h>
  24. #include <net/busy_poll.h>
  25. #include <net/xdp.h>
  26. #include "xsk_queue.h"
  27. #include "xdp_umem.h"
  28. #include "xsk.h"
  29. #define TX_BATCH_SIZE 32
  30. static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
  31. void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
  32. {
  33. if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
  34. return;
  35. pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
  36. pool->cached_need_wakeup |= XDP_WAKEUP_RX;
  37. }
  38. EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
  39. void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
  40. {
  41. struct xdp_sock *xs;
  42. if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
  43. return;
  44. rcu_read_lock();
  45. list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
  46. xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
  47. }
  48. rcu_read_unlock();
  49. pool->cached_need_wakeup |= XDP_WAKEUP_TX;
  50. }
  51. EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
  52. void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
  53. {
  54. if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
  55. return;
  56. pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
  57. pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
  58. }
  59. EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
  60. void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
  61. {
  62. struct xdp_sock *xs;
  63. if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
  64. return;
  65. rcu_read_lock();
  66. list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
  67. xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
  68. }
  69. rcu_read_unlock();
  70. pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
  71. }
  72. EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
  73. bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
  74. {
  75. return pool->uses_need_wakeup;
  76. }
  77. EXPORT_SYMBOL(xsk_uses_need_wakeup);
  78. struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
  79. u16 queue_id)
  80. {
  81. if (queue_id < dev->real_num_rx_queues)
  82. return dev->_rx[queue_id].pool;
  83. if (queue_id < dev->real_num_tx_queues)
  84. return dev->_tx[queue_id].pool;
  85. return NULL;
  86. }
  87. EXPORT_SYMBOL(xsk_get_pool_from_qid);
  88. void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
  89. {
  90. if (queue_id < dev->num_rx_queues)
  91. dev->_rx[queue_id].pool = NULL;
  92. if (queue_id < dev->num_tx_queues)
  93. dev->_tx[queue_id].pool = NULL;
  94. }
  95. /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
  96. * not know if the device has more tx queues than rx, or the opposite.
  97. * This might also change during run time.
  98. */
  99. int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
  100. u16 queue_id)
  101. {
  102. if (queue_id >= max_t(unsigned int,
  103. dev->real_num_rx_queues,
  104. dev->real_num_tx_queues))
  105. return -EINVAL;
  106. if (queue_id < dev->real_num_rx_queues)
  107. dev->_rx[queue_id].pool = pool;
  108. if (queue_id < dev->real_num_tx_queues)
  109. dev->_tx[queue_id].pool = pool;
  110. return 0;
  111. }
  112. static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
  113. {
  114. struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
  115. u64 addr;
  116. int err;
  117. addr = xp_get_handle(xskb);
  118. err = xskq_prod_reserve_desc(xs->rx, addr, len);
  119. if (err) {
  120. xs->rx_queue_full++;
  121. return err;
  122. }
  123. xp_release(xskb);
  124. return 0;
  125. }
  126. static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
  127. {
  128. void *from_buf, *to_buf;
  129. u32 metalen;
  130. if (unlikely(xdp_data_meta_unsupported(from))) {
  131. from_buf = from->data;
  132. to_buf = to->data;
  133. metalen = 0;
  134. } else {
  135. from_buf = from->data_meta;
  136. metalen = from->data - from->data_meta;
  137. to_buf = to->data - metalen;
  138. }
  139. memcpy(to_buf, from_buf, len + metalen);
  140. }
  141. static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
  142. {
  143. struct xdp_buff *xsk_xdp;
  144. int err;
  145. u32 len;
  146. len = xdp->data_end - xdp->data;
  147. if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
  148. xs->rx_dropped++;
  149. return -ENOSPC;
  150. }
  151. xsk_xdp = xsk_buff_alloc(xs->pool);
  152. if (!xsk_xdp) {
  153. xs->rx_dropped++;
  154. return -ENOMEM;
  155. }
  156. xsk_copy_xdp(xsk_xdp, xdp, len);
  157. err = __xsk_rcv_zc(xs, xsk_xdp, len);
  158. if (err) {
  159. xsk_buff_free(xsk_xdp);
  160. return err;
  161. }
  162. return 0;
  163. }
  164. static bool xsk_tx_writeable(struct xdp_sock *xs)
  165. {
  166. if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
  167. return false;
  168. return true;
  169. }
  170. static bool xsk_is_bound(struct xdp_sock *xs)
  171. {
  172. if (READ_ONCE(xs->state) == XSK_BOUND) {
  173. /* Matches smp_wmb() in bind(). */
  174. smp_rmb();
  175. return true;
  176. }
  177. return false;
  178. }
  179. static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
  180. {
  181. if (!xsk_is_bound(xs))
  182. return -ENXIO;
  183. if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
  184. return -EINVAL;
  185. sk_mark_napi_id_once_xdp(&xs->sk, xdp);
  186. return 0;
  187. }
  188. static void xsk_flush(struct xdp_sock *xs)
  189. {
  190. xskq_prod_submit(xs->rx);
  191. __xskq_cons_release(xs->pool->fq);
  192. sock_def_readable(&xs->sk);
  193. }
  194. int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
  195. {
  196. int err;
  197. spin_lock_bh(&xs->rx_lock);
  198. err = xsk_rcv_check(xs, xdp);
  199. if (!err) {
  200. err = __xsk_rcv(xs, xdp);
  201. xsk_flush(xs);
  202. }
  203. spin_unlock_bh(&xs->rx_lock);
  204. return err;
  205. }
  206. static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
  207. {
  208. int err;
  209. u32 len;
  210. err = xsk_rcv_check(xs, xdp);
  211. if (err)
  212. return err;
  213. if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
  214. len = xdp->data_end - xdp->data;
  215. return __xsk_rcv_zc(xs, xdp, len);
  216. }
  217. err = __xsk_rcv(xs, xdp);
  218. if (!err)
  219. xdp_return_buff(xdp);
  220. return err;
  221. }
  222. int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
  223. {
  224. struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
  225. int err;
  226. err = xsk_rcv(xs, xdp);
  227. if (err)
  228. return err;
  229. if (!xs->flush_node.prev)
  230. list_add(&xs->flush_node, flush_list);
  231. return 0;
  232. }
  233. void __xsk_map_flush(void)
  234. {
  235. struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
  236. struct xdp_sock *xs, *tmp;
  237. list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
  238. xsk_flush(xs);
  239. __list_del_clearprev(&xs->flush_node);
  240. }
  241. }
  242. void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
  243. {
  244. xskq_prod_submit_n(pool->cq, nb_entries);
  245. }
  246. EXPORT_SYMBOL(xsk_tx_completed);
  247. void xsk_tx_release(struct xsk_buff_pool *pool)
  248. {
  249. struct xdp_sock *xs;
  250. rcu_read_lock();
  251. list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
  252. __xskq_cons_release(xs->tx);
  253. if (xsk_tx_writeable(xs))
  254. xs->sk.sk_write_space(&xs->sk);
  255. }
  256. rcu_read_unlock();
  257. }
  258. EXPORT_SYMBOL(xsk_tx_release);
  259. bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
  260. {
  261. struct xdp_sock *xs;
  262. rcu_read_lock();
  263. list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
  264. if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
  265. xs->tx->queue_empty_descs++;
  266. continue;
  267. }
  268. /* This is the backpressure mechanism for the Tx path.
  269. * Reserve space in the completion queue and only proceed
  270. * if there is space in it. This avoids having to implement
  271. * any buffering in the Tx path.
  272. */
  273. if (xskq_prod_reserve_addr(pool->cq, desc->addr))
  274. goto out;
  275. xskq_cons_release(xs->tx);
  276. rcu_read_unlock();
  277. return true;
  278. }
  279. out:
  280. rcu_read_unlock();
  281. return false;
  282. }
  283. EXPORT_SYMBOL(xsk_tx_peek_desc);
  284. static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
  285. {
  286. struct xdp_desc *descs = pool->tx_descs;
  287. u32 nb_pkts = 0;
  288. while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
  289. nb_pkts++;
  290. xsk_tx_release(pool);
  291. return nb_pkts;
  292. }
  293. u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
  294. {
  295. struct xdp_sock *xs;
  296. rcu_read_lock();
  297. if (!list_is_singular(&pool->xsk_tx_list)) {
  298. /* Fallback to the non-batched version */
  299. rcu_read_unlock();
  300. return xsk_tx_peek_release_fallback(pool, nb_pkts);
  301. }
  302. xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
  303. if (!xs) {
  304. nb_pkts = 0;
  305. goto out;
  306. }
  307. nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
  308. /* This is the backpressure mechanism for the Tx path. Try to
  309. * reserve space in the completion queue for all packets, but
  310. * if there are fewer slots available, just process that many
  311. * packets. This avoids having to implement any buffering in
  312. * the Tx path.
  313. */
  314. nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
  315. if (!nb_pkts)
  316. goto out;
  317. nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
  318. if (!nb_pkts) {
  319. xs->tx->queue_empty_descs++;
  320. goto out;
  321. }
  322. __xskq_cons_release(xs->tx);
  323. xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
  324. xs->sk.sk_write_space(&xs->sk);
  325. out:
  326. rcu_read_unlock();
  327. return nb_pkts;
  328. }
  329. EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
  330. static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
  331. {
  332. struct net_device *dev = xs->dev;
  333. return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
  334. }
  335. static void xsk_destruct_skb(struct sk_buff *skb)
  336. {
  337. u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
  338. struct xdp_sock *xs = xdp_sk(skb->sk);
  339. unsigned long flags;
  340. spin_lock_irqsave(&xs->pool->cq_lock, flags);
  341. xskq_prod_submit_addr(xs->pool->cq, addr);
  342. spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
  343. sock_wfree(skb);
  344. }
  345. static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
  346. struct xdp_desc *desc)
  347. {
  348. struct xsk_buff_pool *pool = xs->pool;
  349. u32 hr, len, ts, offset, copy, copied;
  350. struct sk_buff *skb;
  351. struct page *page;
  352. void *buffer;
  353. int err, i;
  354. u64 addr;
  355. hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
  356. skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
  357. if (unlikely(!skb))
  358. return ERR_PTR(err);
  359. skb_reserve(skb, hr);
  360. addr = desc->addr;
  361. len = desc->len;
  362. ts = pool->unaligned ? len : pool->chunk_size;
  363. buffer = xsk_buff_raw_get_data(pool, addr);
  364. offset = offset_in_page(buffer);
  365. addr = buffer - pool->addrs;
  366. for (copied = 0, i = 0; copied < len; i++) {
  367. page = pool->umem->pgs[addr >> PAGE_SHIFT];
  368. get_page(page);
  369. copy = min_t(u32, PAGE_SIZE - offset, len - copied);
  370. skb_fill_page_desc(skb, i, page, offset, copy);
  371. copied += copy;
  372. addr += copy;
  373. offset = 0;
  374. }
  375. skb->len += len;
  376. skb->data_len += len;
  377. skb->truesize += ts;
  378. refcount_add(ts, &xs->sk.sk_wmem_alloc);
  379. return skb;
  380. }
  381. static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
  382. struct xdp_desc *desc)
  383. {
  384. struct net_device *dev = xs->dev;
  385. struct sk_buff *skb;
  386. if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
  387. skb = xsk_build_skb_zerocopy(xs, desc);
  388. if (IS_ERR(skb))
  389. return skb;
  390. } else {
  391. u32 hr, tr, len;
  392. void *buffer;
  393. int err;
  394. hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
  395. tr = dev->needed_tailroom;
  396. len = desc->len;
  397. skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
  398. if (unlikely(!skb))
  399. return ERR_PTR(err);
  400. skb_reserve(skb, hr);
  401. skb_put(skb, len);
  402. buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
  403. err = skb_store_bits(skb, 0, buffer, len);
  404. if (unlikely(err)) {
  405. kfree_skb(skb);
  406. return ERR_PTR(err);
  407. }
  408. }
  409. skb->dev = dev;
  410. skb->priority = xs->sk.sk_priority;
  411. skb->mark = READ_ONCE(xs->sk.sk_mark);
  412. skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
  413. skb->destructor = xsk_destruct_skb;
  414. return skb;
  415. }
  416. static int __xsk_generic_xmit(struct sock *sk)
  417. {
  418. struct xdp_sock *xs = xdp_sk(sk);
  419. u32 max_batch = TX_BATCH_SIZE;
  420. bool sent_frame = false;
  421. struct xdp_desc desc;
  422. struct sk_buff *skb;
  423. unsigned long flags;
  424. int err = 0;
  425. mutex_lock(&xs->mutex);
  426. /* Since we dropped the RCU read lock, the socket state might have changed. */
  427. if (unlikely(!xsk_is_bound(xs))) {
  428. err = -ENXIO;
  429. goto out;
  430. }
  431. if (xs->queue_id >= xs->dev->real_num_tx_queues)
  432. goto out;
  433. while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
  434. if (max_batch-- == 0) {
  435. err = -EAGAIN;
  436. goto out;
  437. }
  438. /* This is the backpressure mechanism for the Tx path.
  439. * Reserve space in the completion queue and only proceed
  440. * if there is space in it. This avoids having to implement
  441. * any buffering in the Tx path.
  442. */
  443. spin_lock_irqsave(&xs->pool->cq_lock, flags);
  444. if (xskq_prod_reserve(xs->pool->cq)) {
  445. spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
  446. goto out;
  447. }
  448. spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
  449. skb = xsk_build_skb(xs, &desc);
  450. if (IS_ERR(skb)) {
  451. err = PTR_ERR(skb);
  452. spin_lock_irqsave(&xs->pool->cq_lock, flags);
  453. xskq_prod_cancel(xs->pool->cq);
  454. spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
  455. goto out;
  456. }
  457. err = __dev_direct_xmit(skb, xs->queue_id);
  458. if (err == NETDEV_TX_BUSY) {
  459. /* Tell user-space to retry the send */
  460. skb->destructor = sock_wfree;
  461. spin_lock_irqsave(&xs->pool->cq_lock, flags);
  462. xskq_prod_cancel(xs->pool->cq);
  463. spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
  464. /* Free skb without triggering the perf drop trace */
  465. consume_skb(skb);
  466. err = -EAGAIN;
  467. goto out;
  468. }
  469. xskq_cons_release(xs->tx);
  470. /* Ignore NET_XMIT_CN as packet might have been sent */
  471. if (err == NET_XMIT_DROP) {
  472. /* SKB completed but not sent */
  473. err = -EBUSY;
  474. goto out;
  475. }
  476. sent_frame = true;
  477. }
  478. xs->tx->queue_empty_descs++;
  479. out:
  480. if (sent_frame)
  481. if (xsk_tx_writeable(xs))
  482. sk->sk_write_space(sk);
  483. mutex_unlock(&xs->mutex);
  484. return err;
  485. }
  486. static int xsk_generic_xmit(struct sock *sk)
  487. {
  488. int ret;
  489. /* Drop the RCU lock since the SKB path might sleep. */
  490. rcu_read_unlock();
  491. ret = __xsk_generic_xmit(sk);
  492. /* Reaquire RCU lock before going into common code. */
  493. rcu_read_lock();
  494. return ret;
  495. }
  496. static bool xsk_no_wakeup(struct sock *sk)
  497. {
  498. #ifdef CONFIG_NET_RX_BUSY_POLL
  499. /* Prefer busy-polling, skip the wakeup. */
  500. return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
  501. READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
  502. #else
  503. return false;
  504. #endif
  505. }
  506. static int xsk_check_common(struct xdp_sock *xs)
  507. {
  508. if (unlikely(!xsk_is_bound(xs)))
  509. return -ENXIO;
  510. if (unlikely(!(xs->dev->flags & IFF_UP)))
  511. return -ENETDOWN;
  512. return 0;
  513. }
  514. static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
  515. {
  516. bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
  517. struct sock *sk = sock->sk;
  518. struct xdp_sock *xs = xdp_sk(sk);
  519. struct xsk_buff_pool *pool;
  520. int err;
  521. err = xsk_check_common(xs);
  522. if (err)
  523. return err;
  524. if (unlikely(need_wait))
  525. return -EOPNOTSUPP;
  526. if (unlikely(!xs->tx))
  527. return -ENOBUFS;
  528. if (sk_can_busy_loop(sk)) {
  529. if (xs->zc)
  530. __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
  531. sk_busy_loop(sk, 1); /* only support non-blocking sockets */
  532. }
  533. if (xs->zc && xsk_no_wakeup(sk))
  534. return 0;
  535. pool = xs->pool;
  536. if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
  537. if (xs->zc)
  538. return xsk_wakeup(xs, XDP_WAKEUP_TX);
  539. return xsk_generic_xmit(sk);
  540. }
  541. return 0;
  542. }
  543. static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
  544. {
  545. int ret;
  546. rcu_read_lock();
  547. ret = __xsk_sendmsg(sock, m, total_len);
  548. rcu_read_unlock();
  549. return ret;
  550. }
  551. static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
  552. {
  553. bool need_wait = !(flags & MSG_DONTWAIT);
  554. struct sock *sk = sock->sk;
  555. struct xdp_sock *xs = xdp_sk(sk);
  556. int err;
  557. err = xsk_check_common(xs);
  558. if (err)
  559. return err;
  560. if (unlikely(!xs->rx))
  561. return -ENOBUFS;
  562. if (unlikely(need_wait))
  563. return -EOPNOTSUPP;
  564. if (sk_can_busy_loop(sk))
  565. sk_busy_loop(sk, 1); /* only support non-blocking sockets */
  566. if (xsk_no_wakeup(sk))
  567. return 0;
  568. if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
  569. return xsk_wakeup(xs, XDP_WAKEUP_RX);
  570. return 0;
  571. }
  572. static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
  573. {
  574. int ret;
  575. rcu_read_lock();
  576. ret = __xsk_recvmsg(sock, m, len, flags);
  577. rcu_read_unlock();
  578. return ret;
  579. }
  580. static __poll_t xsk_poll(struct file *file, struct socket *sock,
  581. struct poll_table_struct *wait)
  582. {
  583. __poll_t mask = 0;
  584. struct sock *sk = sock->sk;
  585. struct xdp_sock *xs = xdp_sk(sk);
  586. struct xsk_buff_pool *pool;
  587. sock_poll_wait(file, sock, wait);
  588. rcu_read_lock();
  589. if (xsk_check_common(xs))
  590. goto out;
  591. pool = xs->pool;
  592. if (pool->cached_need_wakeup) {
  593. if (xs->zc)
  594. xsk_wakeup(xs, pool->cached_need_wakeup);
  595. else if (xs->tx)
  596. /* Poll needs to drive Tx also in copy mode */
  597. xsk_generic_xmit(sk);
  598. }
  599. if (xs->rx && !xskq_prod_is_empty(xs->rx))
  600. mask |= EPOLLIN | EPOLLRDNORM;
  601. if (xs->tx && xsk_tx_writeable(xs))
  602. mask |= EPOLLOUT | EPOLLWRNORM;
  603. out:
  604. rcu_read_unlock();
  605. return mask;
  606. }
  607. static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
  608. bool umem_queue)
  609. {
  610. struct xsk_queue *q;
  611. if (entries == 0 || *queue || !is_power_of_2(entries))
  612. return -EINVAL;
  613. q = xskq_create(entries, umem_queue);
  614. if (!q)
  615. return -ENOMEM;
  616. /* Make sure queue is ready before it can be seen by others */
  617. smp_wmb();
  618. WRITE_ONCE(*queue, q);
  619. return 0;
  620. }
  621. static void xsk_unbind_dev(struct xdp_sock *xs)
  622. {
  623. struct net_device *dev = xs->dev;
  624. if (xs->state != XSK_BOUND)
  625. return;
  626. WRITE_ONCE(xs->state, XSK_UNBOUND);
  627. /* Wait for driver to stop using the xdp socket. */
  628. xp_del_xsk(xs->pool, xs);
  629. synchronize_net();
  630. dev_put(dev);
  631. }
  632. static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
  633. struct xdp_sock __rcu ***map_entry)
  634. {
  635. struct xsk_map *map = NULL;
  636. struct xsk_map_node *node;
  637. *map_entry = NULL;
  638. spin_lock_bh(&xs->map_list_lock);
  639. node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
  640. node);
  641. if (node) {
  642. bpf_map_inc(&node->map->map);
  643. map = node->map;
  644. *map_entry = node->map_entry;
  645. }
  646. spin_unlock_bh(&xs->map_list_lock);
  647. return map;
  648. }
  649. static void xsk_delete_from_maps(struct xdp_sock *xs)
  650. {
  651. /* This function removes the current XDP socket from all the
  652. * maps it resides in. We need to take extra care here, due to
  653. * the two locks involved. Each map has a lock synchronizing
  654. * updates to the entries, and each socket has a lock that
  655. * synchronizes access to the list of maps (map_list). For
  656. * deadlock avoidance the locks need to be taken in the order
  657. * "map lock"->"socket map list lock". We start off by
  658. * accessing the socket map list, and take a reference to the
  659. * map to guarantee existence between the
  660. * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
  661. * calls. Then we ask the map to remove the socket, which
  662. * tries to remove the socket from the map. Note that there
  663. * might be updates to the map between
  664. * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
  665. */
  666. struct xdp_sock __rcu **map_entry = NULL;
  667. struct xsk_map *map;
  668. while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
  669. xsk_map_try_sock_delete(map, xs, map_entry);
  670. bpf_map_put(&map->map);
  671. }
  672. }
  673. static int xsk_release(struct socket *sock)
  674. {
  675. struct sock *sk = sock->sk;
  676. struct xdp_sock *xs = xdp_sk(sk);
  677. struct net *net;
  678. if (!sk)
  679. return 0;
  680. net = sock_net(sk);
  681. mutex_lock(&net->xdp.lock);
  682. sk_del_node_init_rcu(sk);
  683. mutex_unlock(&net->xdp.lock);
  684. sock_prot_inuse_add(net, sk->sk_prot, -1);
  685. xsk_delete_from_maps(xs);
  686. mutex_lock(&xs->mutex);
  687. xsk_unbind_dev(xs);
  688. mutex_unlock(&xs->mutex);
  689. xskq_destroy(xs->rx);
  690. xskq_destroy(xs->tx);
  691. xskq_destroy(xs->fq_tmp);
  692. xskq_destroy(xs->cq_tmp);
  693. sock_orphan(sk);
  694. sock->sk = NULL;
  695. sk_refcnt_debug_release(sk);
  696. sock_put(sk);
  697. return 0;
  698. }
  699. static struct socket *xsk_lookup_xsk_from_fd(int fd)
  700. {
  701. struct socket *sock;
  702. int err;
  703. sock = sockfd_lookup(fd, &err);
  704. if (!sock)
  705. return ERR_PTR(-ENOTSOCK);
  706. if (sock->sk->sk_family != PF_XDP) {
  707. sockfd_put(sock);
  708. return ERR_PTR(-ENOPROTOOPT);
  709. }
  710. return sock;
  711. }
  712. static bool xsk_validate_queues(struct xdp_sock *xs)
  713. {
  714. return xs->fq_tmp && xs->cq_tmp;
  715. }
  716. static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
  717. {
  718. struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
  719. struct sock *sk = sock->sk;
  720. struct xdp_sock *xs = xdp_sk(sk);
  721. struct net_device *dev;
  722. int bound_dev_if;
  723. u32 flags, qid;
  724. int err = 0;
  725. if (addr_len < sizeof(struct sockaddr_xdp))
  726. return -EINVAL;
  727. if (sxdp->sxdp_family != AF_XDP)
  728. return -EINVAL;
  729. flags = sxdp->sxdp_flags;
  730. if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
  731. XDP_USE_NEED_WAKEUP))
  732. return -EINVAL;
  733. bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
  734. if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
  735. return -EINVAL;
  736. rtnl_lock();
  737. mutex_lock(&xs->mutex);
  738. if (xs->state != XSK_READY) {
  739. err = -EBUSY;
  740. goto out_release;
  741. }
  742. dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
  743. if (!dev) {
  744. err = -ENODEV;
  745. goto out_release;
  746. }
  747. if (!xs->rx && !xs->tx) {
  748. err = -EINVAL;
  749. goto out_unlock;
  750. }
  751. qid = sxdp->sxdp_queue_id;
  752. if (flags & XDP_SHARED_UMEM) {
  753. struct xdp_sock *umem_xs;
  754. struct socket *sock;
  755. if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
  756. (flags & XDP_USE_NEED_WAKEUP)) {
  757. /* Cannot specify flags for shared sockets. */
  758. err = -EINVAL;
  759. goto out_unlock;
  760. }
  761. if (xs->umem) {
  762. /* We have already our own. */
  763. err = -EINVAL;
  764. goto out_unlock;
  765. }
  766. sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
  767. if (IS_ERR(sock)) {
  768. err = PTR_ERR(sock);
  769. goto out_unlock;
  770. }
  771. umem_xs = xdp_sk(sock->sk);
  772. if (!xsk_is_bound(umem_xs)) {
  773. err = -EBADF;
  774. sockfd_put(sock);
  775. goto out_unlock;
  776. }
  777. if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
  778. /* Share the umem with another socket on another qid
  779. * and/or device.
  780. */
  781. xs->pool = xp_create_and_assign_umem(xs,
  782. umem_xs->umem);
  783. if (!xs->pool) {
  784. err = -ENOMEM;
  785. sockfd_put(sock);
  786. goto out_unlock;
  787. }
  788. err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
  789. qid);
  790. if (err) {
  791. xp_destroy(xs->pool);
  792. xs->pool = NULL;
  793. sockfd_put(sock);
  794. goto out_unlock;
  795. }
  796. } else {
  797. /* Share the buffer pool with the other socket. */
  798. if (xs->fq_tmp || xs->cq_tmp) {
  799. /* Do not allow setting your own fq or cq. */
  800. err = -EINVAL;
  801. sockfd_put(sock);
  802. goto out_unlock;
  803. }
  804. xp_get_pool(umem_xs->pool);
  805. xs->pool = umem_xs->pool;
  806. /* If underlying shared umem was created without Tx
  807. * ring, allocate Tx descs array that Tx batching API
  808. * utilizes
  809. */
  810. if (xs->tx && !xs->pool->tx_descs) {
  811. err = xp_alloc_tx_descs(xs->pool, xs);
  812. if (err) {
  813. xp_put_pool(xs->pool);
  814. xs->pool = NULL;
  815. sockfd_put(sock);
  816. goto out_unlock;
  817. }
  818. }
  819. }
  820. xdp_get_umem(umem_xs->umem);
  821. WRITE_ONCE(xs->umem, umem_xs->umem);
  822. sockfd_put(sock);
  823. } else if (!xs->umem || !xsk_validate_queues(xs)) {
  824. err = -EINVAL;
  825. goto out_unlock;
  826. } else {
  827. /* This xsk has its own umem. */
  828. xs->pool = xp_create_and_assign_umem(xs, xs->umem);
  829. if (!xs->pool) {
  830. err = -ENOMEM;
  831. goto out_unlock;
  832. }
  833. err = xp_assign_dev(xs->pool, dev, qid, flags);
  834. if (err) {
  835. xp_destroy(xs->pool);
  836. xs->pool = NULL;
  837. goto out_unlock;
  838. }
  839. }
  840. /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
  841. xs->fq_tmp = NULL;
  842. xs->cq_tmp = NULL;
  843. xs->dev = dev;
  844. xs->zc = xs->umem->zc;
  845. xs->queue_id = qid;
  846. xp_add_xsk(xs->pool, xs);
  847. out_unlock:
  848. if (err) {
  849. dev_put(dev);
  850. } else {
  851. /* Matches smp_rmb() in bind() for shared umem
  852. * sockets, and xsk_is_bound().
  853. */
  854. smp_wmb();
  855. WRITE_ONCE(xs->state, XSK_BOUND);
  856. }
  857. out_release:
  858. mutex_unlock(&xs->mutex);
  859. rtnl_unlock();
  860. return err;
  861. }
  862. struct xdp_umem_reg_v1 {
  863. __u64 addr; /* Start of packet data area */
  864. __u64 len; /* Length of packet data area */
  865. __u32 chunk_size;
  866. __u32 headroom;
  867. };
  868. static int xsk_setsockopt(struct socket *sock, int level, int optname,
  869. sockptr_t optval, unsigned int optlen)
  870. {
  871. struct sock *sk = sock->sk;
  872. struct xdp_sock *xs = xdp_sk(sk);
  873. int err;
  874. if (level != SOL_XDP)
  875. return -ENOPROTOOPT;
  876. switch (optname) {
  877. case XDP_RX_RING:
  878. case XDP_TX_RING:
  879. {
  880. struct xsk_queue **q;
  881. int entries;
  882. if (optlen < sizeof(entries))
  883. return -EINVAL;
  884. if (copy_from_sockptr(&entries, optval, sizeof(entries)))
  885. return -EFAULT;
  886. mutex_lock(&xs->mutex);
  887. if (xs->state != XSK_READY) {
  888. mutex_unlock(&xs->mutex);
  889. return -EBUSY;
  890. }
  891. q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
  892. err = xsk_init_queue(entries, q, false);
  893. if (!err && optname == XDP_TX_RING)
  894. /* Tx needs to be explicitly woken up the first time */
  895. xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
  896. mutex_unlock(&xs->mutex);
  897. return err;
  898. }
  899. case XDP_UMEM_REG:
  900. {
  901. size_t mr_size = sizeof(struct xdp_umem_reg);
  902. struct xdp_umem_reg mr = {};
  903. struct xdp_umem *umem;
  904. if (optlen < sizeof(struct xdp_umem_reg_v1))
  905. return -EINVAL;
  906. else if (optlen < sizeof(mr))
  907. mr_size = sizeof(struct xdp_umem_reg_v1);
  908. if (copy_from_sockptr(&mr, optval, mr_size))
  909. return -EFAULT;
  910. mutex_lock(&xs->mutex);
  911. if (xs->state != XSK_READY || xs->umem) {
  912. mutex_unlock(&xs->mutex);
  913. return -EBUSY;
  914. }
  915. umem = xdp_umem_create(&mr);
  916. if (IS_ERR(umem)) {
  917. mutex_unlock(&xs->mutex);
  918. return PTR_ERR(umem);
  919. }
  920. /* Make sure umem is ready before it can be seen by others */
  921. smp_wmb();
  922. WRITE_ONCE(xs->umem, umem);
  923. mutex_unlock(&xs->mutex);
  924. return 0;
  925. }
  926. case XDP_UMEM_FILL_RING:
  927. case XDP_UMEM_COMPLETION_RING:
  928. {
  929. struct xsk_queue **q;
  930. int entries;
  931. if (copy_from_sockptr(&entries, optval, sizeof(entries)))
  932. return -EFAULT;
  933. mutex_lock(&xs->mutex);
  934. if (xs->state != XSK_READY) {
  935. mutex_unlock(&xs->mutex);
  936. return -EBUSY;
  937. }
  938. q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
  939. &xs->cq_tmp;
  940. err = xsk_init_queue(entries, q, true);
  941. mutex_unlock(&xs->mutex);
  942. return err;
  943. }
  944. default:
  945. break;
  946. }
  947. return -ENOPROTOOPT;
  948. }
  949. static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
  950. {
  951. ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
  952. ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
  953. ring->desc = offsetof(struct xdp_rxtx_ring, desc);
  954. }
  955. static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
  956. {
  957. ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
  958. ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
  959. ring->desc = offsetof(struct xdp_umem_ring, desc);
  960. }
  961. struct xdp_statistics_v1 {
  962. __u64 rx_dropped;
  963. __u64 rx_invalid_descs;
  964. __u64 tx_invalid_descs;
  965. };
  966. static int xsk_getsockopt(struct socket *sock, int level, int optname,
  967. char __user *optval, int __user *optlen)
  968. {
  969. struct sock *sk = sock->sk;
  970. struct xdp_sock *xs = xdp_sk(sk);
  971. int len;
  972. if (level != SOL_XDP)
  973. return -ENOPROTOOPT;
  974. if (get_user(len, optlen))
  975. return -EFAULT;
  976. if (len < 0)
  977. return -EINVAL;
  978. switch (optname) {
  979. case XDP_STATISTICS:
  980. {
  981. struct xdp_statistics stats = {};
  982. bool extra_stats = true;
  983. size_t stats_size;
  984. if (len < sizeof(struct xdp_statistics_v1)) {
  985. return -EINVAL;
  986. } else if (len < sizeof(stats)) {
  987. extra_stats = false;
  988. stats_size = sizeof(struct xdp_statistics_v1);
  989. } else {
  990. stats_size = sizeof(stats);
  991. }
  992. mutex_lock(&xs->mutex);
  993. stats.rx_dropped = xs->rx_dropped;
  994. if (extra_stats) {
  995. stats.rx_ring_full = xs->rx_queue_full;
  996. stats.rx_fill_ring_empty_descs =
  997. xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
  998. stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
  999. } else {
  1000. stats.rx_dropped += xs->rx_queue_full;
  1001. }
  1002. stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
  1003. stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
  1004. mutex_unlock(&xs->mutex);
  1005. if (copy_to_user(optval, &stats, stats_size))
  1006. return -EFAULT;
  1007. if (put_user(stats_size, optlen))
  1008. return -EFAULT;
  1009. return 0;
  1010. }
  1011. case XDP_MMAP_OFFSETS:
  1012. {
  1013. struct xdp_mmap_offsets off;
  1014. struct xdp_mmap_offsets_v1 off_v1;
  1015. bool flags_supported = true;
  1016. void *to_copy;
  1017. if (len < sizeof(off_v1))
  1018. return -EINVAL;
  1019. else if (len < sizeof(off))
  1020. flags_supported = false;
  1021. if (flags_supported) {
  1022. /* xdp_ring_offset is identical to xdp_ring_offset_v1
  1023. * except for the flags field added to the end.
  1024. */
  1025. xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
  1026. &off.rx);
  1027. xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
  1028. &off.tx);
  1029. xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
  1030. &off.fr);
  1031. xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
  1032. &off.cr);
  1033. off.rx.flags = offsetof(struct xdp_rxtx_ring,
  1034. ptrs.flags);
  1035. off.tx.flags = offsetof(struct xdp_rxtx_ring,
  1036. ptrs.flags);
  1037. off.fr.flags = offsetof(struct xdp_umem_ring,
  1038. ptrs.flags);
  1039. off.cr.flags = offsetof(struct xdp_umem_ring,
  1040. ptrs.flags);
  1041. len = sizeof(off);
  1042. to_copy = &off;
  1043. } else {
  1044. xsk_enter_rxtx_offsets(&off_v1.rx);
  1045. xsk_enter_rxtx_offsets(&off_v1.tx);
  1046. xsk_enter_umem_offsets(&off_v1.fr);
  1047. xsk_enter_umem_offsets(&off_v1.cr);
  1048. len = sizeof(off_v1);
  1049. to_copy = &off_v1;
  1050. }
  1051. if (copy_to_user(optval, to_copy, len))
  1052. return -EFAULT;
  1053. if (put_user(len, optlen))
  1054. return -EFAULT;
  1055. return 0;
  1056. }
  1057. case XDP_OPTIONS:
  1058. {
  1059. struct xdp_options opts = {};
  1060. if (len < sizeof(opts))
  1061. return -EINVAL;
  1062. mutex_lock(&xs->mutex);
  1063. if (xs->zc)
  1064. opts.flags |= XDP_OPTIONS_ZEROCOPY;
  1065. mutex_unlock(&xs->mutex);
  1066. len = sizeof(opts);
  1067. if (copy_to_user(optval, &opts, len))
  1068. return -EFAULT;
  1069. if (put_user(len, optlen))
  1070. return -EFAULT;
  1071. return 0;
  1072. }
  1073. default:
  1074. break;
  1075. }
  1076. return -EOPNOTSUPP;
  1077. }
  1078. static int xsk_mmap(struct file *file, struct socket *sock,
  1079. struct vm_area_struct *vma)
  1080. {
  1081. loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
  1082. unsigned long size = vma->vm_end - vma->vm_start;
  1083. struct xdp_sock *xs = xdp_sk(sock->sk);
  1084. struct xsk_queue *q = NULL;
  1085. unsigned long pfn;
  1086. struct page *qpg;
  1087. if (READ_ONCE(xs->state) != XSK_READY)
  1088. return -EBUSY;
  1089. if (offset == XDP_PGOFF_RX_RING) {
  1090. q = READ_ONCE(xs->rx);
  1091. } else if (offset == XDP_PGOFF_TX_RING) {
  1092. q = READ_ONCE(xs->tx);
  1093. } else {
  1094. /* Matches the smp_wmb() in XDP_UMEM_REG */
  1095. smp_rmb();
  1096. if (offset == XDP_UMEM_PGOFF_FILL_RING)
  1097. q = READ_ONCE(xs->fq_tmp);
  1098. else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
  1099. q = READ_ONCE(xs->cq_tmp);
  1100. }
  1101. if (!q)
  1102. return -EINVAL;
  1103. /* Matches the smp_wmb() in xsk_init_queue */
  1104. smp_rmb();
  1105. qpg = virt_to_head_page(q->ring);
  1106. if (size > page_size(qpg))
  1107. return -EINVAL;
  1108. pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
  1109. return remap_pfn_range(vma, vma->vm_start, pfn,
  1110. size, vma->vm_page_prot);
  1111. }
  1112. static int xsk_notifier(struct notifier_block *this,
  1113. unsigned long msg, void *ptr)
  1114. {
  1115. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1116. struct net *net = dev_net(dev);
  1117. struct sock *sk;
  1118. switch (msg) {
  1119. case NETDEV_UNREGISTER:
  1120. mutex_lock(&net->xdp.lock);
  1121. sk_for_each(sk, &net->xdp.list) {
  1122. struct xdp_sock *xs = xdp_sk(sk);
  1123. mutex_lock(&xs->mutex);
  1124. if (xs->dev == dev) {
  1125. sk->sk_err = ENETDOWN;
  1126. if (!sock_flag(sk, SOCK_DEAD))
  1127. sk_error_report(sk);
  1128. xsk_unbind_dev(xs);
  1129. /* Clear device references. */
  1130. xp_clear_dev(xs->pool);
  1131. }
  1132. mutex_unlock(&xs->mutex);
  1133. }
  1134. mutex_unlock(&net->xdp.lock);
  1135. break;
  1136. }
  1137. return NOTIFY_DONE;
  1138. }
  1139. static struct proto xsk_proto = {
  1140. .name = "XDP",
  1141. .owner = THIS_MODULE,
  1142. .obj_size = sizeof(struct xdp_sock),
  1143. };
  1144. static const struct proto_ops xsk_proto_ops = {
  1145. .family = PF_XDP,
  1146. .owner = THIS_MODULE,
  1147. .release = xsk_release,
  1148. .bind = xsk_bind,
  1149. .connect = sock_no_connect,
  1150. .socketpair = sock_no_socketpair,
  1151. .accept = sock_no_accept,
  1152. .getname = sock_no_getname,
  1153. .poll = xsk_poll,
  1154. .ioctl = sock_no_ioctl,
  1155. .listen = sock_no_listen,
  1156. .shutdown = sock_no_shutdown,
  1157. .setsockopt = xsk_setsockopt,
  1158. .getsockopt = xsk_getsockopt,
  1159. .sendmsg = xsk_sendmsg,
  1160. .recvmsg = xsk_recvmsg,
  1161. .mmap = xsk_mmap,
  1162. .sendpage = sock_no_sendpage,
  1163. };
  1164. static void xsk_destruct(struct sock *sk)
  1165. {
  1166. struct xdp_sock *xs = xdp_sk(sk);
  1167. if (!sock_flag(sk, SOCK_DEAD))
  1168. return;
  1169. if (!xp_put_pool(xs->pool))
  1170. xdp_put_umem(xs->umem, !xs->pool);
  1171. sk_refcnt_debug_dec(sk);
  1172. }
  1173. static int xsk_create(struct net *net, struct socket *sock, int protocol,
  1174. int kern)
  1175. {
  1176. struct xdp_sock *xs;
  1177. struct sock *sk;
  1178. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  1179. return -EPERM;
  1180. if (sock->type != SOCK_RAW)
  1181. return -ESOCKTNOSUPPORT;
  1182. if (protocol)
  1183. return -EPROTONOSUPPORT;
  1184. sock->state = SS_UNCONNECTED;
  1185. sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
  1186. if (!sk)
  1187. return -ENOBUFS;
  1188. sock->ops = &xsk_proto_ops;
  1189. sock_init_data(sock, sk);
  1190. sk->sk_family = PF_XDP;
  1191. sk->sk_destruct = xsk_destruct;
  1192. sk_refcnt_debug_inc(sk);
  1193. sock_set_flag(sk, SOCK_RCU_FREE);
  1194. xs = xdp_sk(sk);
  1195. xs->state = XSK_READY;
  1196. mutex_init(&xs->mutex);
  1197. spin_lock_init(&xs->rx_lock);
  1198. INIT_LIST_HEAD(&xs->map_list);
  1199. spin_lock_init(&xs->map_list_lock);
  1200. mutex_lock(&net->xdp.lock);
  1201. sk_add_node_rcu(sk, &net->xdp.list);
  1202. mutex_unlock(&net->xdp.lock);
  1203. sock_prot_inuse_add(net, &xsk_proto, 1);
  1204. return 0;
  1205. }
  1206. static const struct net_proto_family xsk_family_ops = {
  1207. .family = PF_XDP,
  1208. .create = xsk_create,
  1209. .owner = THIS_MODULE,
  1210. };
  1211. static struct notifier_block xsk_netdev_notifier = {
  1212. .notifier_call = xsk_notifier,
  1213. };
  1214. static int __net_init xsk_net_init(struct net *net)
  1215. {
  1216. mutex_init(&net->xdp.lock);
  1217. INIT_HLIST_HEAD(&net->xdp.list);
  1218. return 0;
  1219. }
  1220. static void __net_exit xsk_net_exit(struct net *net)
  1221. {
  1222. WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
  1223. }
  1224. static struct pernet_operations xsk_net_ops = {
  1225. .init = xsk_net_init,
  1226. .exit = xsk_net_exit,
  1227. };
  1228. static int __init xsk_init(void)
  1229. {
  1230. int err, cpu;
  1231. err = proto_register(&xsk_proto, 0 /* no slab */);
  1232. if (err)
  1233. goto out;
  1234. err = sock_register(&xsk_family_ops);
  1235. if (err)
  1236. goto out_proto;
  1237. err = register_pernet_subsys(&xsk_net_ops);
  1238. if (err)
  1239. goto out_sk;
  1240. err = register_netdevice_notifier(&xsk_netdev_notifier);
  1241. if (err)
  1242. goto out_pernet;
  1243. for_each_possible_cpu(cpu)
  1244. INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
  1245. return 0;
  1246. out_pernet:
  1247. unregister_pernet_subsys(&xsk_net_ops);
  1248. out_sk:
  1249. sock_unregister(PF_XDP);
  1250. out_proto:
  1251. proto_unregister(&xsk_proto);
  1252. out:
  1253. return err;
  1254. }
  1255. fs_initcall(xsk_init);