qcacmn: Update mlo ctx stats API with xmit type
Update mlo ctx stats API with xmit type. MLO ctx ingress stats will be stored as per the legacy/mlo xmit type. Update stats incrementing mechanism on peer unmap. Change-Id: I2f16b95e64e6a7697d18d9453e9f7c60631f80bb CRs-Fixed: 3561679
This commit is contained in:

committed by
Rahul Choudhary

parent
15edfd0a13
commit
3694cff9dc
@@ -2005,11 +2005,13 @@ enum cdp_stats {
|
||||
* @UPDATE_PEER_STATS: update peer stats
|
||||
* @UPDATE_VDEV_STATS: update vdev stats
|
||||
* @UPDATE_PDEV_STATS: Update pdev stats
|
||||
* @UPDATE_VDEV_STATS_MLD: Update mld vdev stats
|
||||
*/
|
||||
enum cdp_stat_update_type {
|
||||
UPDATE_PEER_STATS = 0,
|
||||
UPDATE_VDEV_STATS = 1,
|
||||
UPDATE_PDEV_STATS = 2,
|
||||
UPDATE_VDEV_STATS_MLD = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -2772,15 +2772,50 @@ static void dp_txrx_set_mlo_mcast_primary_vdev_param_be(
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
void dp_get_vdev_stats_for_unmap_peer_be(struct dp_vdev *vdev,
|
||||
struct dp_peer *peer,
|
||||
struct cdp_vdev_stats **vdev_stats)
|
||||
static void dp_get_vdev_stats_for_unmap_peer_mlo(struct dp_vdev *vdev,
|
||||
struct dp_peer *peer)
|
||||
{
|
||||
struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
|
||||
struct cdp_vdev_stats *vdev_stats = &be_vdev->mlo_stats;
|
||||
struct dp_txrx_peer *txrx_peer = dp_get_txrx_peer(peer);
|
||||
struct dp_pdev *pdev = vdev->pdev;
|
||||
struct dp_soc *soc = vdev->pdev->soc;
|
||||
uint8_t link_id = dp_get_peer_hw_link_id(soc, pdev);
|
||||
struct dp_peer_per_pkt_stats *per_pkt_stats;
|
||||
|
||||
if (!IS_DP_LEGACY_PEER(peer))
|
||||
*vdev_stats = &be_vdev->mlo_stats;
|
||||
if (!txrx_peer)
|
||||
goto link_stats;
|
||||
|
||||
dp_peer_aggregate_tid_stats(peer);
|
||||
|
||||
if (!IS_MLO_DP_LINK_PEER(peer)) {
|
||||
per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats;
|
||||
dp_update_vdev_basic_stats(txrx_peer, vdev_stats);
|
||||
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
|
||||
}
|
||||
|
||||
if (IS_MLO_DP_LINK_PEER(peer)) {
|
||||
link_id = dp_get_peer_hw_link_id(soc, pdev);
|
||||
if (link_id > 0) {
|
||||
per_pkt_stats =
|
||||
&txrx_peer->stats[link_id].per_pkt_stats;
|
||||
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
|
||||
}
|
||||
}
|
||||
|
||||
link_stats:
|
||||
dp_monitor_peer_get_stats(soc, peer, vdev_stats, UPDATE_VDEV_STATS_MLD);
|
||||
}
|
||||
|
||||
static
|
||||
void dp_get_vdev_stats_for_unmap_peer_be(struct dp_vdev *vdev,
|
||||
struct dp_peer *peer)
|
||||
{
|
||||
|
||||
if (IS_DP_LEGACY_PEER(peer))
|
||||
dp_get_vdev_stats_for_unmap_peer_legacy(vdev, peer);
|
||||
else
|
||||
dp_get_vdev_stats_for_unmap_peer_mlo(vdev, peer);
|
||||
}
|
||||
#else
|
||||
static void dp_txrx_set_mlo_mcast_primary_vdev_param_be(
|
||||
@@ -2878,8 +2913,7 @@ QDF_STATUS dp_txrx_get_vdev_mcast_param_be(struct dp_soc *soc,
|
||||
|
||||
static
|
||||
void dp_get_vdev_stats_for_unmap_peer_be(struct dp_vdev *vdev,
|
||||
struct dp_peer *peer,
|
||||
struct cdp_vdev_stats **vdev_stats)
|
||||
struct dp_peer *peer)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
@@ -3295,10 +3329,10 @@ QDF_STATUS dp_mlo_dev_ctxt_vdev_detach(struct cdp_soc_t *soc_hdl,
|
||||
be_vdev->mlo_dev_ctxt = NULL;
|
||||
|
||||
/* Save vdev stats in MLO dev ctx */
|
||||
dp_update_mlo_ctxt_stats(&mlo_dev_ctxt->stats, &vdev->stats);
|
||||
dp_update_mlo_mld_vdev_ctxt_stats(&mlo_dev_ctxt->stats, &vdev->stats);
|
||||
|
||||
/* reset vdev stats to zero */
|
||||
qdf_mem_set(&vdev->stats, sizeof(struct cdp_vdev_stats), 0);
|
||||
qdf_mem_set(&vdev->stats, sizeof(struct dp_vdev_stats), 0);
|
||||
|
||||
/* unref for mlo ctxt removed from be_vdev*/
|
||||
dp_mlo_dev_ctxt_unref_delete(mlo_dev_ctxt, DP_MOD_ID_CHILD);
|
||||
|
@@ -484,7 +484,7 @@ struct dp_mlo_dev_ctxt {
|
||||
qdf_atomic_t ref_cnt;
|
||||
qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
|
||||
uint8_t ref_delete_pending;
|
||||
struct cdp_vdev_stats stats;
|
||||
struct dp_vdev_stats stats;
|
||||
};
|
||||
#endif /* WLAN_FEATURE_11BE_MLO */
|
||||
|
||||
@@ -868,19 +868,37 @@ static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_update_mlo_ctxt_stats() - aggregate stats from mlo ctx
|
||||
* dp_update_mlo_mld_vdev_ctxt_stats() - aggregate stats from mlo ctx
|
||||
* @buf: vdev stats buf
|
||||
* @mlo_ctxt_stats: mlo ctxt stats
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
static inline
|
||||
void dp_update_mlo_ctxt_stats(void *buf,
|
||||
struct cdp_vdev_stats *mlo_ctxt_stats)
|
||||
void dp_update_mlo_mld_vdev_ctxt_stats(void *buf,
|
||||
struct dp_vdev_stats *mlo_ctxt_stats)
|
||||
{
|
||||
struct dp_vdev_stats *tgt_vdev_stats = (struct dp_vdev_stats *)buf;
|
||||
|
||||
DP_UPDATE_TO_MLD_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats,
|
||||
DP_XMIT_TOTAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_update_mlo_link_vdev_ctxt_stats() - aggregate stats from mlo ctx
|
||||
* @buf: vdev stats buf
|
||||
* @mlo_ctxt_stats: mlo ctxt stats
|
||||
* @xmit_type: xmit type of packet - MLD/Link
|
||||
* return: void
|
||||
*/
|
||||
static inline
|
||||
void dp_update_mlo_link_vdev_ctxt_stats(void *buf,
|
||||
struct dp_vdev_stats *mlo_ctxt_stats,
|
||||
enum dp_pkt_xmit_type xmit_type)
|
||||
{
|
||||
struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)buf;
|
||||
|
||||
DP_UPDATE_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats);
|
||||
DP_UPDATE_TO_LINK_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats, xmit_type);
|
||||
}
|
||||
|
||||
#ifdef WLAN_FEATURE_NEAR_FULL_IRQ
|
||||
|
@@ -695,6 +695,11 @@ static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
|
||||
DP_MOD_ID_GENERIC_STATS,
|
||||
DP_LINK_VDEV_ITER,
|
||||
DP_VDEV_ITERATE_SKIP_SELF);
|
||||
|
||||
/* Aggregate vdev stats from MLO ctx for detached MLO Links */
|
||||
dp_update_mlo_link_vdev_ctxt_stats(buf,
|
||||
&vdev_be->mlo_dev_ctxt->stats,
|
||||
DP_XMIT_MLD);
|
||||
} else {
|
||||
dp_aggregate_interface_stats(vdev, buf);
|
||||
|
||||
@@ -707,10 +712,12 @@ static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
|
||||
DP_MOD_ID_GENERIC_STATS,
|
||||
DP_LINK_VDEV_ITER,
|
||||
DP_VDEV_ITERATE_SKIP_SELF);
|
||||
}
|
||||
|
||||
/* Aggregate vdev stats from MLO ctx for detached MLO Links */
|
||||
dp_update_mlo_ctxt_stats(buf, &vdev_be->mlo_dev_ctxt->stats);
|
||||
dp_update_mlo_link_vdev_ctxt_stats(buf,
|
||||
&vdev_be->mlo_dev_ctxt->stats,
|
||||
DP_XMIT_TOTAL);
|
||||
}
|
||||
|
||||
complete:
|
||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
|
||||
|
@@ -2440,12 +2440,170 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
|
||||
DP_UPDATE_RX_INGRESS_STATS(_tgtobj, _srcobj); \
|
||||
} while (0)
|
||||
|
||||
#define DP_UPDATE_VDEV_STATS(_tgtobj, _srcobj) \
|
||||
#define DP_UPDATE_MLD_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type) \
|
||||
do { \
|
||||
DP_UPDATE_INGRESS_STATS(_tgtobj, _srcobj); \
|
||||
uint8_t i = 0; \
|
||||
uint8_t idx = 0; \
|
||||
enum dp_pkt_xmit_type temp_xmit_type = _xmit_type; \
|
||||
if (temp_xmit_type == DP_XMIT_MLD) { \
|
||||
idx = DP_VDEV_XMIT_TYPE; \
|
||||
temp_xmit_type = DP_VDEV_XMIT_TYPE; \
|
||||
} else if (temp_xmit_type == DP_XMIT_TOTAL) { \
|
||||
temp_xmit_type = DP_VDEV_XMIT_TYPE; \
|
||||
} \
|
||||
for (; idx <= temp_xmit_type; idx++) { \
|
||||
_tgtobj->tx_i[idx].rcvd.num += _srcobj->tx_i[idx].rcvd.num; \
|
||||
_tgtobj->tx_i[idx].rcvd.bytes += \
|
||||
_srcobj->tx_i[idx].rcvd.bytes; \
|
||||
_tgtobj->tx_i[idx].rcvd_in_fast_xmit_flow += \
|
||||
_srcobj->tx_i[idx].rcvd_in_fast_xmit_flow; \
|
||||
for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \
|
||||
_tgtobj->tx_i[idx].rcvd_per_core[i] += \
|
||||
_srcobj->tx_i[idx].rcvd_per_core[i]; \
|
||||
} \
|
||||
_tgtobj->tx_i[idx].processed.num += \
|
||||
_srcobj->tx_i[idx].processed.num; \
|
||||
_tgtobj->tx_i[idx].processed.bytes += \
|
||||
_srcobj->tx_i[idx].processed.bytes; \
|
||||
_tgtobj->tx_i[idx].reinject_pkts.num += \
|
||||
_srcobj->tx_i[idx].reinject_pkts.num; \
|
||||
_tgtobj->tx_i[idx].reinject_pkts.bytes += \
|
||||
_srcobj->tx_i[idx].reinject_pkts.bytes; \
|
||||
_tgtobj->tx_i[idx].inspect_pkts.num += \
|
||||
_srcobj->tx_i[idx].inspect_pkts.num; \
|
||||
_tgtobj->tx_i[idx].inspect_pkts.bytes += \
|
||||
_srcobj->tx_i[idx].inspect_pkts.bytes; \
|
||||
_tgtobj->tx_i[idx].nawds_mcast.num += \
|
||||
_srcobj->tx_i[idx].nawds_mcast.num; \
|
||||
_tgtobj->tx_i[idx].nawds_mcast.bytes += \
|
||||
_srcobj->tx_i[idx].nawds_mcast.bytes; \
|
||||
_tgtobj->tx_i[idx].bcast.num += \
|
||||
_srcobj->tx_i[idx].bcast.num; \
|
||||
_tgtobj->tx_i[idx].bcast.bytes += \
|
||||
_srcobj->tx_i[idx].bcast.bytes; \
|
||||
_tgtobj->tx_i[idx].raw.raw_pkt.num += \
|
||||
_srcobj->tx_i[idx].raw.raw_pkt.num; \
|
||||
_tgtobj->tx_i[idx].raw.raw_pkt.bytes += \
|
||||
_srcobj->tx_i[idx].raw.raw_pkt.bytes; \
|
||||
_tgtobj->tx_i[idx].raw.dma_map_error += \
|
||||
_srcobj->tx_i[idx].raw.dma_map_error; \
|
||||
_tgtobj->tx_i[idx].raw.invalid_raw_pkt_datatype += \
|
||||
_srcobj->tx_i[idx].raw.invalid_raw_pkt_datatype; \
|
||||
_tgtobj->tx_i[idx].raw.num_frags_overflow_err += \
|
||||
_srcobj->tx_i[idx].raw.num_frags_overflow_err; \
|
||||
_tgtobj->tx_i[idx].sg.sg_pkt.num += \
|
||||
_srcobj->tx_i[idx].sg.sg_pkt.num; \
|
||||
_tgtobj->tx_i[idx].sg.sg_pkt.bytes += \
|
||||
_srcobj->tx_i[idx].sg.sg_pkt.bytes; \
|
||||
_tgtobj->tx_i[idx].sg.non_sg_pkts.num += \
|
||||
_srcobj->tx_i[idx].sg.non_sg_pkts.num; \
|
||||
_tgtobj->tx_i[idx].sg.non_sg_pkts.bytes += \
|
||||
_srcobj->tx_i[idx].sg.non_sg_pkts.bytes; \
|
||||
_tgtobj->tx_i[idx].sg.dropped_host.num += \
|
||||
_srcobj->tx_i[idx].sg.dropped_host.num; \
|
||||
_tgtobj->tx_i[idx].sg.dropped_host.bytes += \
|
||||
_srcobj->tx_i[idx].sg.dropped_host.bytes; \
|
||||
_tgtobj->tx_i[idx].sg.dropped_target += \
|
||||
_srcobj->tx_i[idx].sg.dropped_target; \
|
||||
_tgtobj->tx_i[idx].sg.dma_map_error += \
|
||||
_srcobj->tx_i[idx].sg.dma_map_error; \
|
||||
_tgtobj->tx_i[idx].mcast_en.mcast_pkt.num += \
|
||||
_srcobj->tx_i[idx].mcast_en.mcast_pkt.num; \
|
||||
_tgtobj->tx_i[idx].mcast_en.mcast_pkt.bytes += \
|
||||
_srcobj->tx_i[idx].mcast_en.mcast_pkt.bytes; \
|
||||
_tgtobj->tx_i[idx].mcast_en.dropped_map_error += \
|
||||
_srcobj->tx_i[idx].mcast_en.dropped_map_error; \
|
||||
_tgtobj->tx_i[idx].mcast_en.dropped_self_mac += \
|
||||
_srcobj->tx_i[idx].mcast_en.dropped_self_mac; \
|
||||
_tgtobj->tx_i[idx].mcast_en.dropped_send_fail += \
|
||||
_srcobj->tx_i[idx].mcast_en.dropped_send_fail; \
|
||||
_tgtobj->tx_i[idx].mcast_en.ucast += \
|
||||
_srcobj->tx_i[idx].mcast_en.ucast; \
|
||||
_tgtobj->tx_i[idx].mcast_en.fail_seg_alloc += \
|
||||
_srcobj->tx_i[idx].mcast_en.fail_seg_alloc; \
|
||||
_tgtobj->tx_i[idx].mcast_en.clone_fail += \
|
||||
_srcobj->tx_i[idx].mcast_en.clone_fail; \
|
||||
_tgtobj->tx_i[idx].igmp_mcast_en.igmp_rcvd += \
|
||||
_srcobj->tx_i[idx].igmp_mcast_en.igmp_rcvd; \
|
||||
_tgtobj->tx_i[idx].igmp_mcast_en.igmp_ucast_converted += \
|
||||
_srcobj->tx_i[idx].igmp_mcast_en.igmp_ucast_converted; \
|
||||
_tgtobj->tx_i[idx].dropped.desc_na.num += \
|
||||
_srcobj->tx_i[idx].dropped.desc_na.num; \
|
||||
_tgtobj->tx_i[idx].dropped.desc_na.bytes += \
|
||||
_srcobj->tx_i[idx].dropped.desc_na.bytes; \
|
||||
_tgtobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.num += \
|
||||
_srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.num; \
|
||||
_tgtobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.bytes += \
|
||||
_srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.bytes; \
|
||||
_tgtobj->tx_i[idx].dropped.desc_na_exc_outstand.num += \
|
||||
_srcobj->tx_i[idx].dropped.desc_na_exc_outstand.num; \
|
||||
_tgtobj->tx_i[idx].dropped.desc_na_exc_outstand.bytes += \
|
||||
_srcobj->tx_i[idx].dropped.desc_na_exc_outstand.bytes; \
|
||||
_tgtobj->tx_i[idx].dropped.exc_desc_na.num += \
|
||||
_srcobj->tx_i[idx].dropped.exc_desc_na.num; \
|
||||
_tgtobj->tx_i[idx].dropped.exc_desc_na.bytes += \
|
||||
_srcobj->tx_i[idx].dropped.exc_desc_na.bytes; \
|
||||
_tgtobj->tx_i[idx].dropped.ring_full += \
|
||||
_srcobj->tx_i[idx].dropped.ring_full; \
|
||||
_tgtobj->tx_i[idx].dropped.enqueue_fail += \
|
||||
_srcobj->tx_i[idx].dropped.enqueue_fail; \
|
||||
_tgtobj->tx_i[idx].dropped.dma_error += \
|
||||
_srcobj->tx_i[idx].dropped.dma_error; \
|
||||
_tgtobj->tx_i[idx].dropped.res_full += \
|
||||
_srcobj->tx_i[idx].dropped.res_full; \
|
||||
_tgtobj->tx_i[idx].dropped.headroom_insufficient += \
|
||||
_srcobj->tx_i[idx].dropped.headroom_insufficient; \
|
||||
_tgtobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check += \
|
||||
_srcobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check; \
|
||||
_tgtobj->tx_i[idx].dropped.drop_ingress += \
|
||||
_srcobj->tx_i[idx].dropped.drop_ingress; \
|
||||
_tgtobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path += \
|
||||
_srcobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path; \
|
||||
_tgtobj->tx_i[idx].dropped.tx_mcast_drop += \
|
||||
_srcobj->tx_i[idx].dropped.tx_mcast_drop; \
|
||||
_tgtobj->tx_i[idx].dropped.fw2wbm_tx_drop += \
|
||||
_srcobj->tx_i[idx].dropped.fw2wbm_tx_drop; \
|
||||
_tgtobj->tx_i[idx].dropped.dropped_pkt.bytes += \
|
||||
_srcobj->tx_i[idx].dropped.dropped_pkt.bytes; \
|
||||
_tgtobj->tx_i[idx].mesh.exception_fw += \
|
||||
_srcobj->tx_i[idx].mesh.exception_fw; \
|
||||
_tgtobj->tx_i[idx].mesh.completion_fw += \
|
||||
_srcobj->tx_i[idx].mesh.completion_fw; \
|
||||
_tgtobj->tx_i[idx].cce_classified += \
|
||||
_srcobj->tx_i[idx].cce_classified; \
|
||||
_tgtobj->tx_i[idx].cce_classified_raw += \
|
||||
_srcobj->tx_i[idx].cce_classified_raw; \
|
||||
_tgtobj->tx_i[idx].sniffer_rcvd.num += \
|
||||
_srcobj->tx_i[idx].sniffer_rcvd.num; \
|
||||
_tgtobj->tx_i[idx].sniffer_rcvd.bytes += \
|
||||
_srcobj->tx_i[idx].sniffer_rcvd.bytes; \
|
||||
_tgtobj->tx_i[idx].dropped.dropped_pkt.num = \
|
||||
_tgtobj->tx_i[idx].dropped.dma_error + \
|
||||
_tgtobj->tx_i[idx].dropped.ring_full + \
|
||||
_tgtobj->tx_i[idx].dropped.enqueue_fail + \
|
||||
_tgtobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check + \
|
||||
_tgtobj->tx_i[idx].dropped.desc_na.num + \
|
||||
_tgtobj->tx_i[idx].dropped.res_full + \
|
||||
_tgtobj->tx_i[idx].dropped.drop_ingress + \
|
||||
_tgtobj->tx_i[idx].dropped.headroom_insufficient + \
|
||||
_tgtobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path + \
|
||||
_tgtobj->tx_i[idx].dropped.tx_mcast_drop + \
|
||||
_tgtobj->tx_i[idx].dropped.fw2wbm_tx_drop; \
|
||||
} \
|
||||
DP_UPDATE_RX_INGRESS_STATS(_tgtobj, _srcobj); \
|
||||
} while (0)
|
||||
|
||||
#define DP_UPDATE_TO_MLD_VDEV_STATS(_tgtobj, _srcobj, _xmit_type) \
|
||||
do { \
|
||||
DP_UPDATE_MLD_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type); \
|
||||
DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj); \
|
||||
} while (0)
|
||||
|
||||
#define DP_UPDATE_TO_LINK_VDEV_STATS(_tgtobj, _srcobj, _xmit_type) \
|
||||
do { \
|
||||
DP_UPDATE_LINK_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type); \
|
||||
DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj); \
|
||||
} while (0)
|
||||
/**
|
||||
* dp_peer_find_attach() - Allocates memory for peer objects
|
||||
* @soc: SoC handle
|
||||
|
@@ -6402,13 +6402,20 @@ void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg,
|
||||
DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats);
|
||||
break;
|
||||
}
|
||||
case UPDATE_VDEV_STATS:
|
||||
case UPDATE_VDEV_STATS_MLD:
|
||||
{
|
||||
struct cdp_vdev_stats *vdev_stats =
|
||||
(struct cdp_vdev_stats *)arg;
|
||||
DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats);
|
||||
break;
|
||||
}
|
||||
case UPDATE_VDEV_STATS:
|
||||
{
|
||||
struct dp_vdev_stats *vdev_stats =
|
||||
(struct dp_vdev_stats *)arg;
|
||||
DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
dp_mon_err("Invalid stats_update_type: %u", type);
|
||||
}
|
||||
|
Reference in New Issue
Block a user