qcacmn: Allocate hif_napi_info structures dynamically

The hif_napi_info structure has a dummy netdev included.
The dummy netdev is large.  Avoiding unneeded allocation
save 30kb of memory.  Dynamically allocating the
hif_napi_info structures also reduces the size of
the contiguous memory needed for the parent structure.

Change-Id: I58044e5b1d0a834b3b6d17f66d6f4b2462873f2a
CRs-Fixed: 2016355
This commit is contained in:
Houston Hoffman
2017-03-08 15:57:54 -08:00
committed by snandini
parent 99868ac3ca
commit eab19b3b61
2 changed files with 91 additions and 30 deletions

View File

@@ -263,11 +263,12 @@ struct qca_napi_cpu {
struct qca_napi_data {
qdf_spinlock_t lock;
uint32_t state;
/* bitmap of created/registered NAPI instances, indexed by pipe_id,
* not used by clients (clients use an id returned by create)
*/
uint32_t ce_map;
struct qca_napi_info napis[CE_COUNT_MAX];
struct qca_napi_info *napis[CE_COUNT_MAX];
struct qca_napi_cpu napi_cpu[NR_CPUS];
int lilcl_head, bigcl_head;
enum qca_napi_tput_state napi_mode;

View File

@@ -137,6 +137,7 @@ int hif_napi_create(struct hif_opaque_softc *hif_ctx,
rc = hif_napi_cpu_init(hif_ctx);
if (rc != 0) {
HIF_ERROR("NAPI_initialization failed,. %d", rc);
rc = napid->ce_map;
goto hnc_err;
}
@@ -153,8 +154,21 @@ int hif_napi_create(struct hif_opaque_softc *hif_ctx,
/* Now this is a CE where we need NAPI on */
NAPI_DEBUG("Creating NAPI on pipe %d", i);
napii = qdf_mem_malloc(sizeof(*napii));
napid->napis[i] = napii;
if (!napii) {
NAPI_DEBUG("NAPI alloc failure %d", i);
rc = -ENOMEM;
goto napii_alloc_failure;
}
}
napii = &(napid->napis[i]);
for (i = 0; i < hif->ce_count; i++) {
napii = napid->napis[i];
if (!napii)
continue;
NAPI_DEBUG("initializing NAPI for pipe %d", i);
memset(napii, 0, sizeof(struct qca_napi_info));
napii->scale = scale;
napii->id = NAPI_PIPE2ID(i);
@@ -193,10 +207,21 @@ int hif_napi_create(struct hif_opaque_softc *hif_ctx,
HIF_DBG("%s: NAPI id %d created for pipe %d", __func__,
napii->id, i);
}
NAPI_DEBUG("napi map = %x", napid->ce_map);
NAPI_DEBUG("NAPI ids created for all applicable pipes");
return napid->ce_map;
napii_alloc_failure:
for (i = 0; i < hif->ce_count; i++) {
napii = napid->napis[i];
napid->napis[i] = NULL;
if (napii)
qdf_mem_free(napii);
}
hnc_err:
NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
return napid->ce_map;
return rc;
}
/**
@@ -235,13 +260,23 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
} else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
HIF_ERROR("%s: NAPI instance %d (pipe %d) not created",
__func__, id, ce);
if (hif->napi_data.napis[ce])
HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)",
__func__, id, ce);
rc = -EINVAL;
} else {
struct qca_napi_data *napid;
struct qca_napi_info *napii;
napid = &(hif->napi_data);
napii = &(napid->napis[ce]);
napii = napid->napis[ce];
if (!napii) {
if (napid->ce_map & (0x01 << ce))
HIF_ERROR("%s: napii & ce_map out of sync(ce %d)",
__func__, ce);
return -EINVAL;
}
if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
if (force) {
@@ -269,7 +304,9 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
netif_napi_del(&(napii->napi));
napid->ce_map &= ~(0x01 << ce);
napid->napis[ce] = NULL;
napii->scale = 0;
qdf_mem_free(napii);
HIF_DBG("%s: NAPI %d destroyed\n", __func__, id);
/* if there are no active instances and
@@ -310,9 +347,11 @@ void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
struct qca_napi_info *napii;
napid = &(scn->napi_data);
napii = &(napid->napis[NAPI_ID2PIPE(napi_id)]);
napii = napid->napis[NAPI_ID2PIPE(napi_id)];
if (napii)
return napii->lro_ctx;
return 0;
}
/**
@@ -535,25 +574,27 @@ int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
if (prev_state != napid->state) {
if (napid->state == ENABLE_NAPI_MASK) {
rc = 1;
for (i = 0; i < CE_COUNT_MAX; i++)
if ((napid->ce_map & (0x01 << i))) {
napi = &(napid->napis[i].napi);
for (i = 0; i < CE_COUNT_MAX; i++) {
struct qca_napi_info *napii = napid->napis[i];
if (napii) {
napi = &(napii->napi);
NAPI_DEBUG("%s: enabling NAPI %d",
__func__, i);
napi_enable(napi);
}
}
} else {
rc = 0;
for (i = 0; i < CE_COUNT_MAX; i++)
if (napid->ce_map & (0x01 << i)) {
napi = &(napid->napis[i].napi);
for (i = 0; i < CE_COUNT_MAX; i++) {
struct qca_napi_info *napii = napid->napis[i];
if (napii) {
napi = &(napii->napi);
NAPI_DEBUG("%s: disabling NAPI %d",
__func__, i);
napi_disable(napi);
/* in case it is affined, remove it */
irq_set_affinity_hint(
napid->napis[i].irq,
NULL);
irq_set_affinity_hint(napii->irq, NULL);
}
}
}
} else {
@@ -612,14 +653,22 @@ int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
{
int cpu = smp_processor_id();
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
struct qca_napi_info *napii;
hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE,
NULL, NULL, 0);
scn->napi_data.napis[ce_id].stats[cpu].napi_schedules++;
NAPI_DEBUG("scheduling napi %d (ce:%d)",
scn->napi_data.napis[ce_id].id, ce_id);
napi_schedule(&(scn->napi_data.napis[ce_id].napi));
napii = scn->napi_data.napis[ce_id];
if (qdf_unlikely(!napii)) {
HIF_ERROR("%s, scheduling unallocated napi (ce:%d)",
__func__, ce_id);
qdf_atomic_dec(&scn->active_tasklet_cnt);
return false;
}
napii->stats[cpu].napi_schedules++;
NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id);
napi_schedule(&(napii->napi));
return true;
}
@@ -703,9 +752,6 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
struct qca_napi_info *napi_info;
struct CE_state *ce_state = NULL;
NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
__func__, napi_info->id, napi_info->irq, budget);
if (unlikely(NULL == hif)) {
HIF_ERROR("%s: hif context is NULL", __func__);
QDF_ASSERT(0);
@@ -715,6 +761,9 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
napi_info = (struct qca_napi_info *)
container_of(napi, struct qca_napi_info, napi);
NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
__func__, napi_info->id, napi_info->irq, budget);
napi_info->stats[cpu].napi_polls++;
hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
@@ -838,13 +887,16 @@ void hif_napi_update_yield_stats(struct CE_state *ce_state,
return;
}
if (unlikely(NULL == napi_data->napis[ce_id]))
return;
ce_id = ce_state->id;
cpu_id = qdf_get_cpu();
if (time_limit_reached)
napi_data->napis[ce_id].stats[cpu_id].time_limit_reached++;
napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++;
else
napi_data->napis[ce_id].stats[cpu_id].rxpkt_thresh_reached++;
napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++;
}
/**
@@ -1252,15 +1304,17 @@ static int hncm_migrate_to(struct qca_napi_data *napid,
NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
cpumask.bits[0] = (1 << didx);
if (!napid->napis[napi_ce])
return -EINVAL;
irq_modify_status(napid->napis[napi_ce].irq, IRQ_NO_BALANCING, 0);
rc = irq_set_affinity_hint(napid->napis[napi_ce].irq, &cpumask);
irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0);
rc = irq_set_affinity_hint(napid->napis[napi_ce]->irq, &cpumask);
/* unmark the napis bitmap in the cpu table */
napid->napi_cpu[napid->napis[napi_ce].cpu].napis &= ~(0x01 << napi_ce);
napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce);
/* mark the napis bitmap for the new designated cpu */
napid->napi_cpu[didx].napis |= (0x01 << napi_ce);
napid->napis[napi_ce].cpu = didx;
napid->napis[napi_ce]->cpu = didx;
NAPI_DEBUG("<--%s[%d]", __func__, rc);
return rc;
@@ -1424,17 +1478,23 @@ hncm_return:
static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
{
int i;
struct qca_napi_info *napii;
for (i = 0; i < CE_COUNT_MAX; i++) {
/* check if NAPI is enabled on the CE */
if (!(napid->ce_map & (0x01 << i)))
continue;
/*double check that NAPI is allocated for the CE */
napii = napid->napis[i];
if (!(napii))
continue;
if (bl_flag == true)
irq_modify_status(napid->napis[i].irq,
irq_modify_status(napii->irq,
0, IRQ_NO_BALANCING);
else
irq_modify_status(napid->napis[i].irq,
irq_modify_status(napii->irq,
IRQ_NO_BALANCING, 0);
HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i);
}