rx_common.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /****************************************************************************
  3. * Driver for Solarflare network controllers and boards
  4. * Copyright 2018 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include "net_driver.h"
  11. #include <linux/module.h>
  12. #include <linux/iommu.h>
  13. #include "efx.h"
  14. #include "nic.h"
  15. #include "rx_common.h"
  16. /* This is the percentage fill level below which new RX descriptors
  17. * will be added to the RX descriptor ring.
  18. */
  19. static unsigned int rx_refill_threshold;
  20. module_param(rx_refill_threshold, uint, 0444);
  21. MODULE_PARM_DESC(rx_refill_threshold,
  22. "RX descriptor ring refill threshold (%)");
  23. /* RX maximum head room required.
  24. *
  25. * This must be at least 1 to prevent overflow, plus one packet-worth
  26. * to allow pipelined receives.
  27. */
  28. #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
  29. /* Check the RX page recycle ring for a page that can be reused. */
  30. static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
  31. {
  32. struct efx_nic *efx = rx_queue->efx;
  33. struct efx_rx_page_state *state;
  34. unsigned int index;
  35. struct page *page;
  36. if (unlikely(!rx_queue->page_ring))
  37. return NULL;
  38. index = rx_queue->page_remove & rx_queue->page_ptr_mask;
  39. page = rx_queue->page_ring[index];
  40. if (page == NULL)
  41. return NULL;
  42. rx_queue->page_ring[index] = NULL;
  43. /* page_remove cannot exceed page_add. */
  44. if (rx_queue->page_remove != rx_queue->page_add)
  45. ++rx_queue->page_remove;
  46. /* If page_count is 1 then we hold the only reference to this page. */
  47. if (page_count(page) == 1) {
  48. ++rx_queue->page_recycle_count;
  49. return page;
  50. } else {
  51. state = page_address(page);
  52. dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
  53. PAGE_SIZE << efx->rx_buffer_order,
  54. DMA_FROM_DEVICE);
  55. put_page(page);
  56. ++rx_queue->page_recycle_failed;
  57. }
  58. return NULL;
  59. }
  60. /* Attempt to recycle the page if there is an RX recycle ring; the page can
  61. * only be added if this is the final RX buffer, to prevent pages being used in
  62. * the descriptor ring and appearing in the recycle ring simultaneously.
  63. */
  64. static void efx_recycle_rx_page(struct efx_channel *channel,
  65. struct efx_rx_buffer *rx_buf)
  66. {
  67. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  68. struct efx_nic *efx = rx_queue->efx;
  69. struct page *page = rx_buf->page;
  70. unsigned int index;
  71. /* Only recycle the page after processing the final buffer. */
  72. if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
  73. return;
  74. index = rx_queue->page_add & rx_queue->page_ptr_mask;
  75. if (rx_queue->page_ring[index] == NULL) {
  76. unsigned int read_index = rx_queue->page_remove &
  77. rx_queue->page_ptr_mask;
  78. /* The next slot in the recycle ring is available, but
  79. * increment page_remove if the read pointer currently
  80. * points here.
  81. */
  82. if (read_index == index)
  83. ++rx_queue->page_remove;
  84. rx_queue->page_ring[index] = page;
  85. ++rx_queue->page_add;
  86. return;
  87. }
  88. ++rx_queue->page_recycle_full;
  89. efx_unmap_rx_buffer(efx, rx_buf);
  90. put_page(rx_buf->page);
  91. }
  92. /* Recycle the pages that are used by buffers that have just been received. */
  93. void efx_recycle_rx_pages(struct efx_channel *channel,
  94. struct efx_rx_buffer *rx_buf,
  95. unsigned int n_frags)
  96. {
  97. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  98. if (unlikely(!rx_queue->page_ring))
  99. return;
  100. do {
  101. efx_recycle_rx_page(channel, rx_buf);
  102. rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
  103. } while (--n_frags);
  104. }
  105. void efx_discard_rx_packet(struct efx_channel *channel,
  106. struct efx_rx_buffer *rx_buf,
  107. unsigned int n_frags)
  108. {
  109. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  110. efx_recycle_rx_pages(channel, rx_buf, n_frags);
  111. efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
  112. }
  113. static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
  114. {
  115. unsigned int bufs_in_recycle_ring, page_ring_size;
  116. struct efx_nic *efx = rx_queue->efx;
  117. bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
  118. page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
  119. efx->rx_bufs_per_page);
  120. rx_queue->page_ring = kcalloc(page_ring_size,
  121. sizeof(*rx_queue->page_ring), GFP_KERNEL);
  122. if (!rx_queue->page_ring)
  123. rx_queue->page_ptr_mask = 0;
  124. else
  125. rx_queue->page_ptr_mask = page_ring_size - 1;
  126. }
  127. static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
  128. {
  129. struct efx_nic *efx = rx_queue->efx;
  130. int i;
  131. if (unlikely(!rx_queue->page_ring))
  132. return;
  133. /* Unmap and release the pages in the recycle ring. Remove the ring. */
  134. for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
  135. struct page *page = rx_queue->page_ring[i];
  136. struct efx_rx_page_state *state;
  137. if (page == NULL)
  138. continue;
  139. state = page_address(page);
  140. dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
  141. PAGE_SIZE << efx->rx_buffer_order,
  142. DMA_FROM_DEVICE);
  143. put_page(page);
  144. }
  145. kfree(rx_queue->page_ring);
  146. rx_queue->page_ring = NULL;
  147. }
  148. static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
  149. struct efx_rx_buffer *rx_buf)
  150. {
  151. /* Release the page reference we hold for the buffer. */
  152. if (rx_buf->page)
  153. put_page(rx_buf->page);
  154. /* If this is the last buffer in a page, unmap and free it. */
  155. if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
  156. efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
  157. efx_free_rx_buffers(rx_queue, rx_buf, 1);
  158. }
  159. rx_buf->page = NULL;
  160. }
  161. int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
  162. {
  163. struct efx_nic *efx = rx_queue->efx;
  164. unsigned int entries;
  165. int rc;
  166. /* Create the smallest power-of-two aligned ring */
  167. entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
  168. EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
  169. rx_queue->ptr_mask = entries - 1;
  170. netif_dbg(efx, probe, efx->net_dev,
  171. "creating RX queue %d size %#x mask %#x\n",
  172. efx_rx_queue_index(rx_queue), efx->rxq_entries,
  173. rx_queue->ptr_mask);
  174. /* Allocate RX buffers */
  175. rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
  176. GFP_KERNEL);
  177. if (!rx_queue->buffer)
  178. return -ENOMEM;
  179. rc = efx_nic_probe_rx(rx_queue);
  180. if (rc) {
  181. kfree(rx_queue->buffer);
  182. rx_queue->buffer = NULL;
  183. }
  184. return rc;
  185. }
  186. void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
  187. {
  188. unsigned int max_fill, trigger, max_trigger;
  189. struct efx_nic *efx = rx_queue->efx;
  190. int rc = 0;
  191. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  192. "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
  193. /* Initialise ptr fields */
  194. rx_queue->added_count = 0;
  195. rx_queue->notified_count = 0;
  196. rx_queue->removed_count = 0;
  197. rx_queue->min_fill = -1U;
  198. efx_init_rx_recycle_ring(rx_queue);
  199. rx_queue->page_remove = 0;
  200. rx_queue->page_add = rx_queue->page_ptr_mask + 1;
  201. rx_queue->page_recycle_count = 0;
  202. rx_queue->page_recycle_failed = 0;
  203. rx_queue->page_recycle_full = 0;
  204. /* Initialise limit fields */
  205. max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
  206. max_trigger =
  207. max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
  208. if (rx_refill_threshold != 0) {
  209. trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
  210. if (trigger > max_trigger)
  211. trigger = max_trigger;
  212. } else {
  213. trigger = max_trigger;
  214. }
  215. rx_queue->max_fill = max_fill;
  216. rx_queue->fast_fill_trigger = trigger;
  217. rx_queue->refill_enabled = true;
  218. /* Initialise XDP queue information */
  219. rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
  220. rx_queue->core_index, 0);
  221. if (rc) {
  222. netif_err(efx, rx_err, efx->net_dev,
  223. "Failure to initialise XDP queue information rc=%d\n",
  224. rc);
  225. efx->xdp_rxq_info_failed = true;
  226. } else {
  227. rx_queue->xdp_rxq_info_valid = true;
  228. }
  229. /* Set up RX descriptor ring */
  230. efx_nic_init_rx(rx_queue);
  231. }
  232. void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
  233. {
  234. struct efx_rx_buffer *rx_buf;
  235. int i;
  236. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  237. "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
  238. del_timer_sync(&rx_queue->slow_fill);
  239. /* Release RX buffers from the current read ptr to the write ptr */
  240. if (rx_queue->buffer) {
  241. for (i = rx_queue->removed_count; i < rx_queue->added_count;
  242. i++) {
  243. unsigned int index = i & rx_queue->ptr_mask;
  244. rx_buf = efx_rx_buffer(rx_queue, index);
  245. efx_fini_rx_buffer(rx_queue, rx_buf);
  246. }
  247. }
  248. efx_fini_rx_recycle_ring(rx_queue);
  249. if (rx_queue->xdp_rxq_info_valid)
  250. xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
  251. rx_queue->xdp_rxq_info_valid = false;
  252. }
  253. void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
  254. {
  255. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  256. "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
  257. efx_nic_remove_rx(rx_queue);
  258. kfree(rx_queue->buffer);
  259. rx_queue->buffer = NULL;
  260. }
  261. /* Unmap a DMA-mapped page. This function is only called for the final RX
  262. * buffer in a page.
  263. */
  264. void efx_unmap_rx_buffer(struct efx_nic *efx,
  265. struct efx_rx_buffer *rx_buf)
  266. {
  267. struct page *page = rx_buf->page;
  268. if (page) {
  269. struct efx_rx_page_state *state = page_address(page);
  270. dma_unmap_page(&efx->pci_dev->dev,
  271. state->dma_addr,
  272. PAGE_SIZE << efx->rx_buffer_order,
  273. DMA_FROM_DEVICE);
  274. }
  275. }
  276. void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
  277. struct efx_rx_buffer *rx_buf,
  278. unsigned int num_bufs)
  279. {
  280. do {
  281. if (rx_buf->page) {
  282. put_page(rx_buf->page);
  283. rx_buf->page = NULL;
  284. }
  285. rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
  286. } while (--num_bufs);
  287. }
  288. void efx_rx_slow_fill(struct timer_list *t)
  289. {
  290. struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
  291. /* Post an event to cause NAPI to run and refill the queue */
  292. efx_nic_generate_fill_event(rx_queue);
  293. ++rx_queue->slow_fill_count;
  294. }
  295. void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
  296. {
  297. mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
  298. }
  299. /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
  300. *
  301. * @rx_queue: Efx RX queue
  302. *
  303. * This allocates a batch of pages, maps them for DMA, and populates
  304. * struct efx_rx_buffers for each one. Return a negative error code or
  305. * 0 on success. If a single page can be used for multiple buffers,
  306. * then the page will either be inserted fully, or not at all.
  307. */
  308. static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
  309. {
  310. unsigned int page_offset, index, count;
  311. struct efx_nic *efx = rx_queue->efx;
  312. struct efx_rx_page_state *state;
  313. struct efx_rx_buffer *rx_buf;
  314. dma_addr_t dma_addr;
  315. struct page *page;
  316. count = 0;
  317. do {
  318. page = efx_reuse_page(rx_queue);
  319. if (page == NULL) {
  320. page = alloc_pages(__GFP_COMP |
  321. (atomic ? GFP_ATOMIC : GFP_KERNEL),
  322. efx->rx_buffer_order);
  323. if (unlikely(page == NULL))
  324. return -ENOMEM;
  325. dma_addr =
  326. dma_map_page(&efx->pci_dev->dev, page, 0,
  327. PAGE_SIZE << efx->rx_buffer_order,
  328. DMA_FROM_DEVICE);
  329. if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
  330. dma_addr))) {
  331. __free_pages(page, efx->rx_buffer_order);
  332. return -EIO;
  333. }
  334. state = page_address(page);
  335. state->dma_addr = dma_addr;
  336. } else {
  337. state = page_address(page);
  338. dma_addr = state->dma_addr;
  339. }
  340. dma_addr += sizeof(struct efx_rx_page_state);
  341. page_offset = sizeof(struct efx_rx_page_state);
  342. do {
  343. index = rx_queue->added_count & rx_queue->ptr_mask;
  344. rx_buf = efx_rx_buffer(rx_queue, index);
  345. rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
  346. EFX_XDP_HEADROOM;
  347. rx_buf->page = page;
  348. rx_buf->page_offset = page_offset + efx->rx_ip_align +
  349. EFX_XDP_HEADROOM;
  350. rx_buf->len = efx->rx_dma_len;
  351. rx_buf->flags = 0;
  352. ++rx_queue->added_count;
  353. get_page(page);
  354. dma_addr += efx->rx_page_buf_step;
  355. page_offset += efx->rx_page_buf_step;
  356. } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
  357. rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
  358. } while (++count < efx->rx_pages_per_batch);
  359. return 0;
  360. }
  361. void efx_rx_config_page_split(struct efx_nic *efx)
  362. {
  363. efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
  364. EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
  365. EFX_RX_BUF_ALIGNMENT);
  366. efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
  367. ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
  368. efx->rx_page_buf_step);
  369. efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
  370. efx->rx_bufs_per_page;
  371. efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
  372. efx->rx_bufs_per_page);
  373. }
  374. /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
  375. * @rx_queue: RX descriptor queue
  376. *
  377. * This will aim to fill the RX descriptor queue up to
  378. * @rx_queue->@max_fill. If there is insufficient atomic
  379. * memory to do so, a slow fill will be scheduled.
  380. *
  381. * The caller must provide serialisation (none is used here). In practise,
  382. * this means this function must run from the NAPI handler, or be called
  383. * when NAPI is disabled.
  384. */
  385. void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
  386. {
  387. struct efx_nic *efx = rx_queue->efx;
  388. unsigned int fill_level, batch_size;
  389. int space, rc = 0;
  390. if (!rx_queue->refill_enabled)
  391. return;
  392. /* Calculate current fill level, and exit if we don't need to fill */
  393. fill_level = (rx_queue->added_count - rx_queue->removed_count);
  394. EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
  395. if (fill_level >= rx_queue->fast_fill_trigger)
  396. goto out;
  397. /* Record minimum fill level */
  398. if (unlikely(fill_level < rx_queue->min_fill)) {
  399. if (fill_level)
  400. rx_queue->min_fill = fill_level;
  401. }
  402. batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
  403. space = rx_queue->max_fill - fill_level;
  404. EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
  405. netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
  406. "RX queue %d fast-filling descriptor ring from"
  407. " level %d to level %d\n",
  408. efx_rx_queue_index(rx_queue), fill_level,
  409. rx_queue->max_fill);
  410. do {
  411. rc = efx_init_rx_buffers(rx_queue, atomic);
  412. if (unlikely(rc)) {
  413. /* Ensure that we don't leave the rx queue empty */
  414. efx_schedule_slow_fill(rx_queue);
  415. goto out;
  416. }
  417. } while ((space -= batch_size) >= batch_size);
  418. netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
  419. "RX queue %d fast-filled descriptor ring "
  420. "to level %d\n", efx_rx_queue_index(rx_queue),
  421. rx_queue->added_count - rx_queue->removed_count);
  422. out:
  423. if (rx_queue->notified_count != rx_queue->added_count)
  424. efx_nic_notify_rx_desc(rx_queue);
  425. }
  426. /* Pass a received packet up through GRO. GRO can handle pages
  427. * regardless of checksum state and skbs with a good checksum.
  428. */
  429. void
  430. efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
  431. unsigned int n_frags, u8 *eh, __wsum csum)
  432. {
  433. struct napi_struct *napi = &channel->napi_str;
  434. struct efx_nic *efx = channel->efx;
  435. struct sk_buff *skb;
  436. skb = napi_get_frags(napi);
  437. if (unlikely(!skb)) {
  438. struct efx_rx_queue *rx_queue;
  439. rx_queue = efx_channel_get_rx_queue(channel);
  440. efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
  441. return;
  442. }
  443. if (efx->net_dev->features & NETIF_F_RXHASH &&
  444. efx_rx_buf_hash_valid(efx, eh))
  445. skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
  446. PKT_HASH_TYPE_L3);
  447. if (csum) {
  448. skb->csum = csum;
  449. skb->ip_summed = CHECKSUM_COMPLETE;
  450. } else {
  451. skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
  452. CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
  453. }
  454. skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
  455. for (;;) {
  456. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  457. rx_buf->page, rx_buf->page_offset,
  458. rx_buf->len);
  459. rx_buf->page = NULL;
  460. skb->len += rx_buf->len;
  461. if (skb_shinfo(skb)->nr_frags == n_frags)
  462. break;
  463. rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
  464. }
  465. skb->data_len = skb->len;
  466. skb->truesize += n_frags * efx->rx_buffer_truesize;
  467. skb_record_rx_queue(skb, channel->rx_queue.core_index);
  468. napi_gro_frags(napi);
  469. }
  470. /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
  471. * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
  472. */
  473. struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
  474. {
  475. struct list_head *head = &efx->rss_context.list;
  476. struct efx_rss_context *ctx, *new;
  477. u32 id = 1; /* Don't use zero, that refers to the master RSS context */
  478. WARN_ON(!mutex_is_locked(&efx->rss_lock));
  479. /* Search for first gap in the numbering */
  480. list_for_each_entry(ctx, head, list) {
  481. if (ctx->user_id != id)
  482. break;
  483. id++;
  484. /* Check for wrap. If this happens, we have nearly 2^32
  485. * allocated RSS contexts, which seems unlikely.
  486. */
  487. if (WARN_ON_ONCE(!id))
  488. return NULL;
  489. }
  490. /* Create the new entry */
  491. new = kmalloc(sizeof(*new), GFP_KERNEL);
  492. if (!new)
  493. return NULL;
  494. new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
  495. new->rx_hash_udp_4tuple = false;
  496. /* Insert the new entry into the gap */
  497. new->user_id = id;
  498. list_add_tail(&new->list, &ctx->list);
  499. return new;
  500. }
  501. struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
  502. {
  503. struct list_head *head = &efx->rss_context.list;
  504. struct efx_rss_context *ctx;
  505. WARN_ON(!mutex_is_locked(&efx->rss_lock));
  506. list_for_each_entry(ctx, head, list)
  507. if (ctx->user_id == id)
  508. return ctx;
  509. return NULL;
  510. }
  511. void efx_free_rss_context_entry(struct efx_rss_context *ctx)
  512. {
  513. list_del(&ctx->list);
  514. kfree(ctx);
  515. }
  516. void efx_set_default_rx_indir_table(struct efx_nic *efx,
  517. struct efx_rss_context *ctx)
  518. {
  519. size_t i;
  520. for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
  521. ctx->rx_indir_table[i] =
  522. ethtool_rxfh_indir_default(i, efx->rss_spread);
  523. }
  524. /**
  525. * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
  526. * @spec: Specification to test
  527. *
  528. * Return: %true if the specification is a non-drop RX filter that
  529. * matches a local MAC address I/G bit value of 1 or matches a local
  530. * IPv4 or IPv6 address value in the respective multicast address
  531. * range. Otherwise %false.
  532. */
  533. bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
  534. {
  535. if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
  536. spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
  537. return false;
  538. if (spec->match_flags &
  539. (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
  540. is_multicast_ether_addr(spec->loc_mac))
  541. return true;
  542. if ((spec->match_flags &
  543. (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
  544. (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
  545. if (spec->ether_type == htons(ETH_P_IP) &&
  546. ipv4_is_multicast(spec->loc_host[0]))
  547. return true;
  548. if (spec->ether_type == htons(ETH_P_IPV6) &&
  549. ((const u8 *)spec->loc_host)[0] == 0xff)
  550. return true;
  551. }
  552. return false;
  553. }
  554. bool efx_filter_spec_equal(const struct efx_filter_spec *left,
  555. const struct efx_filter_spec *right)
  556. {
  557. if ((left->match_flags ^ right->match_flags) |
  558. ((left->flags ^ right->flags) &
  559. (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
  560. return false;
  561. return memcmp(&left->vport_id, &right->vport_id,
  562. sizeof(struct efx_filter_spec) -
  563. offsetof(struct efx_filter_spec, vport_id)) == 0;
  564. }
  565. u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
  566. {
  567. BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
  568. return jhash2((const u32 *)&spec->vport_id,
  569. (sizeof(struct efx_filter_spec) -
  570. offsetof(struct efx_filter_spec, vport_id)) / 4,
  571. 0);
  572. }
  573. #ifdef CONFIG_RFS_ACCEL
  574. bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
  575. bool *force)
  576. {
  577. if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
  578. /* ARFS is currently updating this entry, leave it */
  579. return false;
  580. }
  581. if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
  582. /* ARFS tried and failed to update this, so it's probably out
  583. * of date. Remove the filter and the ARFS rule entry.
  584. */
  585. rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
  586. *force = true;
  587. return true;
  588. } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
  589. /* ARFS has moved on, so old filter is not needed. Since we did
  590. * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
  591. * not be removed by efx_rps_hash_del() subsequently.
  592. */
  593. *force = true;
  594. return true;
  595. }
  596. /* Remove it iff ARFS wants to. */
  597. return true;
  598. }
  599. static
  600. struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
  601. const struct efx_filter_spec *spec)
  602. {
  603. u32 hash = efx_filter_spec_hash(spec);
  604. lockdep_assert_held(&efx->rps_hash_lock);
  605. if (!efx->rps_hash_table)
  606. return NULL;
  607. return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
  608. }
  609. struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
  610. const struct efx_filter_spec *spec)
  611. {
  612. struct efx_arfs_rule *rule;
  613. struct hlist_head *head;
  614. struct hlist_node *node;
  615. head = efx_rps_hash_bucket(efx, spec);
  616. if (!head)
  617. return NULL;
  618. hlist_for_each(node, head) {
  619. rule = container_of(node, struct efx_arfs_rule, node);
  620. if (efx_filter_spec_equal(spec, &rule->spec))
  621. return rule;
  622. }
  623. return NULL;
  624. }
  625. struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
  626. const struct efx_filter_spec *spec,
  627. bool *new)
  628. {
  629. struct efx_arfs_rule *rule;
  630. struct hlist_head *head;
  631. struct hlist_node *node;
  632. head = efx_rps_hash_bucket(efx, spec);
  633. if (!head)
  634. return NULL;
  635. hlist_for_each(node, head) {
  636. rule = container_of(node, struct efx_arfs_rule, node);
  637. if (efx_filter_spec_equal(spec, &rule->spec)) {
  638. *new = false;
  639. return rule;
  640. }
  641. }
  642. rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
  643. *new = true;
  644. if (rule) {
  645. memcpy(&rule->spec, spec, sizeof(rule->spec));
  646. hlist_add_head(&rule->node, head);
  647. }
  648. return rule;
  649. }
  650. void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
  651. {
  652. struct efx_arfs_rule *rule;
  653. struct hlist_head *head;
  654. struct hlist_node *node;
  655. head = efx_rps_hash_bucket(efx, spec);
  656. if (WARN_ON(!head))
  657. return;
  658. hlist_for_each(node, head) {
  659. rule = container_of(node, struct efx_arfs_rule, node);
  660. if (efx_filter_spec_equal(spec, &rule->spec)) {
  661. /* Someone already reused the entry. We know that if
  662. * this check doesn't fire (i.e. filter_id == REMOVING)
  663. * then the REMOVING mark was put there by our caller,
  664. * because caller is holding a lock on filter table and
  665. * only holders of that lock set REMOVING.
  666. */
  667. if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
  668. return;
  669. hlist_del(node);
  670. kfree(rule);
  671. return;
  672. }
  673. }
  674. /* We didn't find it. */
  675. WARN_ON(1);
  676. }
  677. #endif
  678. int efx_probe_filters(struct efx_nic *efx)
  679. {
  680. int rc;
  681. mutex_lock(&efx->mac_lock);
  682. rc = efx->type->filter_table_probe(efx);
  683. if (rc)
  684. goto out_unlock;
  685. #ifdef CONFIG_RFS_ACCEL
  686. if (efx->type->offload_features & NETIF_F_NTUPLE) {
  687. struct efx_channel *channel;
  688. int i, success = 1;
  689. efx_for_each_channel(channel, efx) {
  690. channel->rps_flow_id =
  691. kcalloc(efx->type->max_rx_ip_filters,
  692. sizeof(*channel->rps_flow_id),
  693. GFP_KERNEL);
  694. if (!channel->rps_flow_id)
  695. success = 0;
  696. else
  697. for (i = 0;
  698. i < efx->type->max_rx_ip_filters;
  699. ++i)
  700. channel->rps_flow_id[i] =
  701. RPS_FLOW_ID_INVALID;
  702. channel->rfs_expire_index = 0;
  703. channel->rfs_filter_count = 0;
  704. }
  705. if (!success) {
  706. efx_for_each_channel(channel, efx)
  707. kfree(channel->rps_flow_id);
  708. efx->type->filter_table_remove(efx);
  709. rc = -ENOMEM;
  710. goto out_unlock;
  711. }
  712. }
  713. #endif
  714. out_unlock:
  715. mutex_unlock(&efx->mac_lock);
  716. return rc;
  717. }
  718. void efx_remove_filters(struct efx_nic *efx)
  719. {
  720. #ifdef CONFIG_RFS_ACCEL
  721. struct efx_channel *channel;
  722. efx_for_each_channel(channel, efx) {
  723. cancel_delayed_work_sync(&channel->filter_work);
  724. kfree(channel->rps_flow_id);
  725. channel->rps_flow_id = NULL;
  726. }
  727. #endif
  728. efx->type->filter_table_remove(efx);
  729. }
  730. #ifdef CONFIG_RFS_ACCEL
  731. static void efx_filter_rfs_work(struct work_struct *data)
  732. {
  733. struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
  734. work);
  735. struct efx_nic *efx = efx_netdev_priv(req->net_dev);
  736. struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
  737. int slot_idx = req - efx->rps_slot;
  738. struct efx_arfs_rule *rule;
  739. u16 arfs_id = 0;
  740. int rc;
  741. rc = efx->type->filter_insert(efx, &req->spec, true);
  742. if (rc >= 0)
  743. /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
  744. rc %= efx->type->max_rx_ip_filters;
  745. if (efx->rps_hash_table) {
  746. spin_lock_bh(&efx->rps_hash_lock);
  747. rule = efx_rps_hash_find(efx, &req->spec);
  748. /* The rule might have already gone, if someone else's request
  749. * for the same spec was already worked and then expired before
  750. * we got around to our work. In that case we have nothing
  751. * tying us to an arfs_id, meaning that as soon as the filter
  752. * is considered for expiry it will be removed.
  753. */
  754. if (rule) {
  755. if (rc < 0)
  756. rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
  757. else
  758. rule->filter_id = rc;
  759. arfs_id = rule->arfs_id;
  760. }
  761. spin_unlock_bh(&efx->rps_hash_lock);
  762. }
  763. if (rc >= 0) {
  764. /* Remember this so we can check whether to expire the filter
  765. * later.
  766. */
  767. mutex_lock(&efx->rps_mutex);
  768. if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
  769. channel->rfs_filter_count++;
  770. channel->rps_flow_id[rc] = req->flow_id;
  771. mutex_unlock(&efx->rps_mutex);
  772. if (req->spec.ether_type == htons(ETH_P_IP))
  773. netif_info(efx, rx_status, efx->net_dev,
  774. "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
  775. (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
  776. req->spec.rem_host, ntohs(req->spec.rem_port),
  777. req->spec.loc_host, ntohs(req->spec.loc_port),
  778. req->rxq_index, req->flow_id, rc, arfs_id);
  779. else
  780. netif_info(efx, rx_status, efx->net_dev,
  781. "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
  782. (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
  783. req->spec.rem_host, ntohs(req->spec.rem_port),
  784. req->spec.loc_host, ntohs(req->spec.loc_port),
  785. req->rxq_index, req->flow_id, rc, arfs_id);
  786. channel->n_rfs_succeeded++;
  787. } else {
  788. if (req->spec.ether_type == htons(ETH_P_IP))
  789. netif_dbg(efx, rx_status, efx->net_dev,
  790. "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
  791. (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
  792. req->spec.rem_host, ntohs(req->spec.rem_port),
  793. req->spec.loc_host, ntohs(req->spec.loc_port),
  794. req->rxq_index, req->flow_id, rc, arfs_id);
  795. else
  796. netif_dbg(efx, rx_status, efx->net_dev,
  797. "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
  798. (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
  799. req->spec.rem_host, ntohs(req->spec.rem_port),
  800. req->spec.loc_host, ntohs(req->spec.loc_port),
  801. req->rxq_index, req->flow_id, rc, arfs_id);
  802. channel->n_rfs_failed++;
  803. /* We're overloading the NIC's filter tables, so let's do a
  804. * chunk of extra expiry work.
  805. */
  806. __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
  807. 100u));
  808. }
  809. /* Release references */
  810. clear_bit(slot_idx, &efx->rps_slot_map);
  811. dev_put(req->net_dev);
  812. }
  813. int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
  814. u16 rxq_index, u32 flow_id)
  815. {
  816. struct efx_nic *efx = efx_netdev_priv(net_dev);
  817. struct efx_async_filter_insertion *req;
  818. struct efx_arfs_rule *rule;
  819. struct flow_keys fk;
  820. int slot_idx;
  821. bool new;
  822. int rc;
  823. /* find a free slot */
  824. for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
  825. if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
  826. break;
  827. if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
  828. return -EBUSY;
  829. if (flow_id == RPS_FLOW_ID_INVALID) {
  830. rc = -EINVAL;
  831. goto out_clear;
  832. }
  833. if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
  834. rc = -EPROTONOSUPPORT;
  835. goto out_clear;
  836. }
  837. if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
  838. rc = -EPROTONOSUPPORT;
  839. goto out_clear;
  840. }
  841. if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
  842. rc = -EPROTONOSUPPORT;
  843. goto out_clear;
  844. }
  845. req = efx->rps_slot + slot_idx;
  846. efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
  847. efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
  848. rxq_index);
  849. req->spec.match_flags =
  850. EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
  851. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
  852. EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
  853. req->spec.ether_type = fk.basic.n_proto;
  854. req->spec.ip_proto = fk.basic.ip_proto;
  855. if (fk.basic.n_proto == htons(ETH_P_IP)) {
  856. req->spec.rem_host[0] = fk.addrs.v4addrs.src;
  857. req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
  858. } else {
  859. memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
  860. sizeof(struct in6_addr));
  861. memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
  862. sizeof(struct in6_addr));
  863. }
  864. req->spec.rem_port = fk.ports.src;
  865. req->spec.loc_port = fk.ports.dst;
  866. if (efx->rps_hash_table) {
  867. /* Add it to ARFS hash table */
  868. spin_lock(&efx->rps_hash_lock);
  869. rule = efx_rps_hash_add(efx, &req->spec, &new);
  870. if (!rule) {
  871. rc = -ENOMEM;
  872. goto out_unlock;
  873. }
  874. if (new)
  875. rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
  876. rc = rule->arfs_id;
  877. /* Skip if existing or pending filter already does the right thing */
  878. if (!new && rule->rxq_index == rxq_index &&
  879. rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
  880. goto out_unlock;
  881. rule->rxq_index = rxq_index;
  882. rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
  883. spin_unlock(&efx->rps_hash_lock);
  884. } else {
  885. /* Without an ARFS hash table, we just use arfs_id 0 for all
  886. * filters. This means if multiple flows hash to the same
  887. * flow_id, all but the most recently touched will be eligible
  888. * for expiry.
  889. */
  890. rc = 0;
  891. }
  892. /* Queue the request */
  893. dev_hold(req->net_dev = net_dev);
  894. INIT_WORK(&req->work, efx_filter_rfs_work);
  895. req->rxq_index = rxq_index;
  896. req->flow_id = flow_id;
  897. schedule_work(&req->work);
  898. return rc;
  899. out_unlock:
  900. spin_unlock(&efx->rps_hash_lock);
  901. out_clear:
  902. clear_bit(slot_idx, &efx->rps_slot_map);
  903. return rc;
  904. }
  905. bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
  906. {
  907. bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
  908. struct efx_nic *efx = channel->efx;
  909. unsigned int index, size, start;
  910. u32 flow_id;
  911. if (!mutex_trylock(&efx->rps_mutex))
  912. return false;
  913. expire_one = efx->type->filter_rfs_expire_one;
  914. index = channel->rfs_expire_index;
  915. start = index;
  916. size = efx->type->max_rx_ip_filters;
  917. while (quota) {
  918. flow_id = channel->rps_flow_id[index];
  919. if (flow_id != RPS_FLOW_ID_INVALID) {
  920. quota--;
  921. if (expire_one(efx, flow_id, index)) {
  922. netif_info(efx, rx_status, efx->net_dev,
  923. "expired filter %d [channel %u flow %u]\n",
  924. index, channel->channel, flow_id);
  925. channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
  926. channel->rfs_filter_count--;
  927. }
  928. }
  929. if (++index == size)
  930. index = 0;
  931. /* If we were called with a quota that exceeds the total number
  932. * of filters in the table (which shouldn't happen, but could
  933. * if two callers race), ensure that we don't loop forever -
  934. * stop when we've examined every row of the table.
  935. */
  936. if (index == start)
  937. break;
  938. }
  939. channel->rfs_expire_index = index;
  940. mutex_unlock(&efx->rps_mutex);
  941. return true;
  942. }
  943. #endif /* CONFIG_RFS_ACCEL */