|
@@ -1589,6 +1589,7 @@ static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx)
|
|
|
((_gro_ret) != GRO_DROP && (_gro_ret) != GRO_NORMAL)
|
|
|
#endif
|
|
|
|
|
|
+#ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
|
|
|
/**
|
|
|
* hdd_gro_rx_bh_disable() - GRO RX/flush function.
|
|
|
* @napi_to_use: napi to be used to give packets to the stack, gro flush
|
|
@@ -1639,6 +1640,49 @@ static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
+#else /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
|
|
|
+
|
|
|
+/**
|
|
|
+ * hdd_gro_rx_bh_disable() - GRO RX/flush function.
|
|
|
+ * @napi_to_use: napi to be used to give packets to the stack, gro flush
|
|
|
+ * @skb: pointer to sk_buff
|
|
|
+ *
|
|
|
+ * Function calls napi_gro_receive for the skb. If the skb indicates that a
|
|
|
+ * flush needs to be done (set by the lower DP layer), the function also calls
|
|
|
+ * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
|
|
|
+ * napi_gro__ calls.
|
|
|
+ *
|
|
|
+ * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
|
|
|
+ * QDF error code.
|
|
|
+ */
|
|
|
+static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
|
|
|
+ struct napi_struct *napi_to_use,
|
|
|
+ struct sk_buff *skb)
|
|
|
+{
|
|
|
+ QDF_STATUS status = QDF_STATUS_SUCCESS;
|
|
|
+ struct hdd_context *hdd_ctx = adapter->hdd_ctx;
|
|
|
+ gro_result_t gro_ret;
|
|
|
+
|
|
|
+ skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
|
|
|
+
|
|
|
+ local_bh_disable();
|
|
|
+ gro_ret = napi_gro_receive(napi_to_use, skb);
|
|
|
+
|
|
|
+ if (hdd_get_current_throughput_level(hdd_ctx) == PLD_BUS_WIDTH_IDLE) {
|
|
|
+ if (HDD_IS_EXTRA_GRO_FLUSH_NECESSARY(gro_ret)) {
|
|
|
+ adapter->hdd_stats.tx_rx_stats.rx_gro_low_tput_flush++;
|
|
|
+ dp_rx_napi_gro_flush(napi_to_use);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ local_bh_enable();
|
|
|
+
|
|
|
+ if (gro_ret == GRO_DROP)
|
|
|
+ status = QDF_STATUS_E_GRO_DROP;
|
|
|
+
|
|
|
+ return status;
|
|
|
+}
|
|
|
+#endif /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
|
|
|
+
|
|
|
/**
|
|
|
* hdd_gro_rx_dp_thread() - Handle Rx procesing via GRO for DP thread
|
|
|
* @adapter: pointer to adapter context
|
|
@@ -2053,6 +2097,7 @@ void hdd_set_fisa_disallowed_for_vdev(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+#ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
|
|
|
/**
|
|
|
* hdd_rx_check_qdisc_for_adapter() - Check if any ingress qdisc is configured
|
|
|
* for given adapter
|
|
@@ -2127,6 +2172,7 @@ QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
|
|
|
bool skb_receive_offload_ok = false;
|
|
|
uint8_t rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(skb);
|
|
|
|
|
|
+ /* rx_ctx_id is already verified for out-of-range */
|
|
|
hdd_rx_check_qdisc_for_adapter(adapter, rx_ctx_id);
|
|
|
|
|
|
if (QDF_NBUF_CB_RX_TCP_PROTO(skb) &&
|
|
@@ -2186,6 +2232,64 @@ QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
|
|
|
|
|
|
return status;
|
|
|
}
|
|
|
+
|
|
|
+#else /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
|
|
|
+
|
|
|
+QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
|
|
|
+ struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct hdd_context *hdd_ctx = adapter->hdd_ctx;
|
|
|
+ int status = QDF_STATUS_E_FAILURE;
|
|
|
+ int netif_status;
|
|
|
+ bool skb_receive_offload_ok = false;
|
|
|
+
|
|
|
+ if (QDF_NBUF_CB_RX_TCP_PROTO(skb) &&
|
|
|
+ !QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
|
|
|
+ skb_receive_offload_ok = true;
|
|
|
+
|
|
|
+ if (skb_receive_offload_ok && hdd_ctx->receive_offload_cb) {
|
|
|
+ status = hdd_ctx->receive_offload_cb(adapter, skb);
|
|
|
+
|
|
|
+ if (QDF_IS_STATUS_SUCCESS(status)) {
|
|
|
+ adapter->hdd_stats.tx_rx_stats.rx_aggregated++;
|
|
|
+ return status;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (status == QDF_STATUS_E_GRO_DROP) {
|
|
|
+ adapter->hdd_stats.tx_rx_stats.rx_gro_dropped++;
|
|
|
+ return status;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ adapter->hdd_stats.tx_rx_stats.rx_non_aggregated++;
|
|
|
+
|
|
|
+ /* Account for GRO/LRO ineligible packets, mostly UDP */
|
|
|
+ hdd_ctx->no_rx_offload_pkt_cnt++;
|
|
|
+
|
|
|
+ if (qdf_likely(hdd_ctx->enable_dp_rx_threads ||
|
|
|
+ hdd_ctx->enable_rxthread)) {
|
|
|
+ local_bh_disable();
|
|
|
+ netif_status = netif_receive_skb(skb);
|
|
|
+ local_bh_enable();
|
|
|
+ } else if (qdf_unlikely(QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))) {
|
|
|
+ /*
|
|
|
+ * Frames before peer is registered to avoid contention with
|
|
|
+ * NAPI softirq.
|
|
|
+ * Refer fix:
|
|
|
+ * qcacld-3.0: Do netif_rx_ni() for frames received before
|
|
|
+ * peer assoc
|
|
|
+ */
|
|
|
+ netif_status = netif_rx_ni(skb);
|
|
|
+ } else { /* NAPI Context */
|
|
|
+ netif_status = netif_receive_skb(skb);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (netif_status == NET_RX_SUCCESS)
|
|
|
+ status = QDF_STATUS_SUCCESS;
|
|
|
+
|
|
|
+ return status;
|
|
|
+}
|
|
|
+#endif /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
|
|
|
#endif
|
|
|
|
|
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
|