ef100_rx.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /****************************************************************************
  3. * Driver for Solarflare network controllers and boards
  4. * Copyright 2005-2019 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include "net_driver.h"
  11. #include "ef100_rx.h"
  12. #include "rx_common.h"
  13. #include "efx.h"
  14. #include "nic_common.h"
  15. #include "mcdi_functions.h"
  16. #include "ef100_regs.h"
  17. #include "ef100_nic.h"
  18. #include "io.h"
  19. /* Get the value of a field in the RX prefix */
  20. #define PREFIX_OFFSET_W(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN / 32)
  21. #define PREFIX_OFFSET_B(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN % 32)
  22. #define PREFIX_WIDTH_MASK(_f) ((1UL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1)
  23. #define PREFIX_WORD(_p, _f) le32_to_cpu((__force __le32)(_p)[PREFIX_OFFSET_W(_f)])
  24. #define PREFIX_FIELD(_p, _f) ((PREFIX_WORD(_p, _f) >> PREFIX_OFFSET_B(_f)) & \
  25. PREFIX_WIDTH_MASK(_f))
  26. #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_LBN \
  27. (ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN)
  28. #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \
  29. ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
  30. bool ef100_rx_buf_hash_valid(const u8 *prefix)
  31. {
  32. return PREFIX_FIELD(prefix, RSS_HASH_VALID);
  33. }
  34. static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
  35. {
  36. u16 rxclass;
  37. u8 l2status;
  38. rxclass = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, CLASS));
  39. l2status = PREFIX_FIELD(&rxclass, HCLASS_L2_STATUS);
  40. if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
  41. /* Everything is ok */
  42. return false;
  43. if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
  44. channel->n_rx_eth_crc_err++;
  45. return true;
  46. }
  47. void __ef100_rx_packet(struct efx_channel *channel)
  48. {
  49. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  50. struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
  51. channel->rx_pkt_index);
  52. struct efx_nic *efx = channel->efx;
  53. struct ef100_nic_data *nic_data;
  54. u8 *eh = efx_rx_buf_va(rx_buf);
  55. __wsum csum = 0;
  56. u16 ing_port;
  57. u32 *prefix;
  58. prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
  59. if (ef100_has_fcs_error(channel, prefix) &&
  60. unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
  61. goto out;
  62. rx_buf->len = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, LENGTH));
  63. if (rx_buf->len <= sizeof(struct ethhdr)) {
  64. if (net_ratelimit())
  65. netif_err(channel->efx, rx_err, channel->efx->net_dev,
  66. "RX packet too small (%d)\n", rx_buf->len);
  67. ++channel->n_rx_frm_trunc;
  68. goto out;
  69. }
  70. ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
  71. nic_data = efx->nic_data;
  72. if (nic_data->have_mport && ing_port != nic_data->base_mport) {
  73. #ifdef CONFIG_SFC_SRIOV
  74. struct efx_rep *efv;
  75. rcu_read_lock();
  76. efv = efx_ef100_find_rep_by_mport(efx, ing_port);
  77. if (efv) {
  78. if (efv->net_dev->flags & IFF_UP)
  79. efx_ef100_rep_rx_packet(efv, rx_buf);
  80. rcu_read_unlock();
  81. /* Representor Rx doesn't care about PF Rx buffer
  82. * ownership, it just makes a copy. So, we are done
  83. * with the Rx buffer from PF point of view and should
  84. * free it.
  85. */
  86. goto free_rx_buffer;
  87. }
  88. rcu_read_unlock();
  89. #endif
  90. if (net_ratelimit())
  91. netif_warn(efx, drv, efx->net_dev,
  92. "Unrecognised ing_port %04x (base %04x), dropping\n",
  93. ing_port, nic_data->base_mport);
  94. channel->n_rx_mport_bad++;
  95. goto free_rx_buffer;
  96. }
  97. if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
  98. if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
  99. ++channel->n_rx_ip_hdr_chksum_err;
  100. } else {
  101. u16 sum = be16_to_cpu((__force __be16)PREFIX_FIELD(prefix, CSUM_FRAME));
  102. csum = (__force __wsum) sum;
  103. }
  104. }
  105. if (channel->type->receive_skb) {
  106. /* no support for special channels yet, so just discard */
  107. WARN_ON_ONCE(1);
  108. goto free_rx_buffer;
  109. }
  110. efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
  111. goto out;
  112. free_rx_buffer:
  113. efx_free_rx_buffers(rx_queue, rx_buf, 1);
  114. out:
  115. channel->rx_pkt_n_frags = 0;
  116. }
  117. static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
  118. {
  119. struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index);
  120. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  121. struct efx_nic *efx = rx_queue->efx;
  122. ++rx_queue->rx_packets;
  123. netif_vdbg(efx, rx_status, efx->net_dev,
  124. "RX queue %d received id %x\n",
  125. efx_rx_queue_index(rx_queue), index);
  126. efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
  127. prefetch(efx_rx_buf_va(rx_buf));
  128. rx_buf->page_offset += efx->rx_prefix_size;
  129. efx_recycle_rx_pages(channel, rx_buf, 1);
  130. efx_rx_flush_packet(channel);
  131. channel->rx_pkt_n_frags = 1;
  132. channel->rx_pkt_index = index;
  133. }
  134. void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event)
  135. {
  136. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  137. unsigned int n_packets =
  138. EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_NUM_PKT);
  139. int i;
  140. WARN_ON_ONCE(!n_packets);
  141. if (n_packets > 1)
  142. ++channel->n_rx_merge_events;
  143. channel->irq_mod_score += 2 * n_packets;
  144. for (i = 0; i < n_packets; ++i) {
  145. ef100_rx_packet(rx_queue,
  146. rx_queue->removed_count & rx_queue->ptr_mask);
  147. ++rx_queue->removed_count;
  148. }
  149. }
  150. void ef100_rx_write(struct efx_rx_queue *rx_queue)
  151. {
  152. struct efx_rx_buffer *rx_buf;
  153. unsigned int idx;
  154. efx_qword_t *rxd;
  155. efx_dword_t rxdb;
  156. while (rx_queue->notified_count != rx_queue->added_count) {
  157. idx = rx_queue->notified_count & rx_queue->ptr_mask;
  158. rx_buf = efx_rx_buffer(rx_queue, idx);
  159. rxd = efx_rx_desc(rx_queue, idx);
  160. EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, rx_buf->dma_addr);
  161. ++rx_queue->notified_count;
  162. }
  163. wmb();
  164. EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX,
  165. rx_queue->added_count & rx_queue->ptr_mask);
  166. efx_writed_page(rx_queue->efx, &rxdb,
  167. ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue));
  168. }