iwlwifi: use threaded interrupt handler

With new transports coming up, move to threaded
interrupt handling now. This has the advantage
that we can use the same locking scheme with all
different transports we may need to implement.

Note that the TX path obviously still runs in a
tasklet, so some spin_lock() calls need to change
to spin_lock_bh() calls to properly lock out the
TX path.

In my test on a Calpella platform this has no
impact on throughput or latency.

Also add lockdep annotations to avoid lockups due
to catch sending synchronous commands or using
locks that connect with them from the irq thread.

Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Johannes Berg
2012-12-27 21:43:48 +01:00
szülő c9f7a8ab77
commit 2bfb50924c
11 fájl változott, egészen pontosan 85 új sor hozzáadva és 52 régi sor törölve

Fájl megtekintése

@@ -249,7 +249,6 @@ struct iwl_trans_pcie {
int ict_index;
u32 inta;
bool use_ict;
struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
@@ -330,7 +329,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
* RX
******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans);
void iwl_pcie_tasklet(struct iwl_trans *trans);
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);

Fájl megtekintése

@@ -81,10 +81,10 @@
* 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
* + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
* list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
* INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
* were enough free buffers and RX_STALLED is set it is cleared.
* + The Host/Firmware iwl->rxq is replenished at irq thread time from the
* rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
* If there were enough free buffers and RX_STALLED is set it is cleared.
*
*
* Driver sequence:
@@ -214,9 +214,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/*
* If the device isn't enabled - not need to try to add buffers...
* This can happen when we stop the device and still have an interrupt
* pending. We stop the APM before we sync the interrupts / tasklets
* because we have to (see comment there). On the other hand, since
* the APM is stopped, we cannot access the HW (in particular not prph).
* pending. We stop the APM before we sync the interrupts because we
* have to (see comment there). On the other hand, since the APM is
* stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
@@ -796,11 +796,14 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
wake_up(&trans_pcie->wait_command_queue);
local_bh_disable();
iwl_op_mode_nic_error(trans->op_mode);
local_bh_enable();
}
void iwl_pcie_tasklet(struct iwl_trans *trans)
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
struct iwl_trans *trans = dev_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
@@ -811,6 +814,8 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
u32 inta_mask;
#endif
lock_map_acquire(&trans->sync_cmd_lockdep_map);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Ack/clear/reset pending uCode interrupts.
@@ -855,7 +860,7 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
handled |= CSR_INT_BIT_HW_ERR;
return;
goto out;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1005,6 +1010,10 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_enable_rfkill_int(trans);
out:
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_HANDLED;
}
/******************************************************************************
@@ -1127,7 +1136,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
* If we have something to service, the irq thread will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here. */
inta_mask = iwl_read32(trans, CSR_INT_MASK);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -1167,9 +1176,9 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
#endif
trans_pcie->inta |= inta;
/* iwl_pcie_tasklet() will service interrupts and re-enable them */
/* the thread will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
return IRQ_WAKE_THREAD;
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
@@ -1277,9 +1286,10 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
trans_pcie->inta |= inta;
/* iwl_pcie_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
if (likely(inta)) {
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_WAKE_THREAD;
} else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,

Fájl megtekintése

@@ -760,7 +760,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
synchronize_irq(trans_pcie->pci_dev->irq);
tasklet_kill(&trans_pcie->irq_tasklet);
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
@@ -1480,6 +1479,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans->ops = &trans_ops_pcie;
trans->cfg = cfg;
trans_lockdep_init(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
@@ -1567,15 +1567,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
iwl_pcie_tasklet, (unsigned long)trans);
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
err = request_irq(pdev->irq, iwl_pcie_isr_ict,
IRQF_SHARED, DRV_NAME, trans);
if (err) {
if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans)) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}

Fájl megtekintése

@@ -926,7 +926,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return;
spin_lock(&txq->lock);
spin_lock_bh(&txq->lock);
if (txq->q.read_ptr == tfd_num)
goto out;
@@ -970,7 +970,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq);
out:
spin_unlock(&txq->lock);
spin_unlock_bh(&txq->lock);
}
/*
@@ -1371,7 +1371,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
return;
}
spin_lock(&txq->lock);
spin_lock_bh(&txq->lock);
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->entries[cmd_index].cmd;
@@ -1405,7 +1405,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->flags = 0;
spin_unlock(&txq->lock);
spin_unlock_bh(&txq->lock);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)