drivers: Remove explicit invocations of mmiowb()
mmiowb() is now implied by spin_unlock() on architectures that require it, so there is no reason to call it from driver code. This patch was generated using coccinelle: @mmiowb@ @@ - mmiowb(); and invoked as: $ for d in drivers include/linux/qed sound; do \ spatch --include-headers --sp-file mmiowb.cocci --dir $d --in-place; done NOTE: mmiowb() has only ever guaranteed ordering in conjunction with spin_unlock(). However, pairing each mmiowb() removal in this patch with the corresponding call to spin_unlock() is not at all trivial, so there is a small chance that this change may regress any drivers incorrectly relying on mmiowb() to order MMIO writes between CPUs using lock-free synchronisation. If you've ended up bisecting to this commit, you can reintroduce the mmiowb() calls using wmb() instead, which should restore the old behaviour on all architectures other than some esoteric ia64 systems. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
此提交包含在:
@@ -774,18 +774,12 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
|
||||
{
|
||||
u16 rc = 0, index;
|
||||
|
||||
/* Make certain HW write took affect */
|
||||
mmiowb();
|
||||
|
||||
index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
|
||||
if (p_sb_desc->index != index) {
|
||||
p_sb_desc->index = index;
|
||||
rc = QED_SB_ATT_IDX;
|
||||
}
|
||||
|
||||
/* Make certain we got a consistent view with HW */
|
||||
mmiowb();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1170,7 +1164,6 @@ static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
|
||||
/* Both segments (interrupts & acks) are written to same place address;
|
||||
* Need to guarantee all commands will be received (in-order) by HW.
|
||||
*/
|
||||
mmiowb();
|
||||
barrier();
|
||||
}
|
||||
|
||||
@@ -1805,9 +1798,6 @@ static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
|
||||
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
|
||||
qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
|
||||
|
||||
/* Flush the writes to IGU */
|
||||
mmiowb();
|
||||
|
||||
/* Unmask AEU signals toward IGU */
|
||||
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
|
||||
}
|
||||
@@ -1871,9 +1861,6 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
|
||||
|
||||
qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
|
||||
|
||||
/* Flush the write to IGU */
|
||||
mmiowb();
|
||||
|
||||
/* calculate where to read the status bit from */
|
||||
sb_bit = 1 << (igu_sb_id % 32);
|
||||
sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
|
||||
|
@@ -341,9 +341,6 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
|
||||
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
|
||||
|
||||
REG_WR16(p_hwfn, addr, prod);
|
||||
|
||||
/* keep prod updates ordered */
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
|
||||
|
@@ -1526,14 +1526,6 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
|
||||
barrier();
|
||||
writel(txq->tx_db.raw, txq->doorbell_addr);
|
||||
|
||||
/* mmiowb is needed to synchronize doorbell writes from more than one
|
||||
* processor. It guarantees that the write arrives to the device before
|
||||
* the queue lock is released and another start_xmit is called (possibly
|
||||
* on another CPU). Without this barrier, the next doorbell can bypass
|
||||
* this doorbell. This is applicable to IA64/Altix systems.
|
||||
*/
|
||||
mmiowb();
|
||||
|
||||
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
|
||||
if (qede_txq_has_work(txq))
|
||||
break;
|
||||
|
@@ -580,14 +580,6 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
|
||||
|
||||
internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
|
||||
(u32 *)&rx_prods);
|
||||
|
||||
/* mmiowb is needed to synchronize doorbell writes from more than one
|
||||
* processor. It guarantees that the write arrives to the device before
|
||||
* the napi lock is released and another qede_poll is called (possibly
|
||||
* on another CPU). Without this barrier, the next doorbell can bypass
|
||||
* this doorbell. This is applicable to IA64/Altix systems.
|
||||
*/
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
|
||||
|
@@ -1858,7 +1858,6 @@ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
|
||||
wmb();
|
||||
writel_relaxed(qdev->small_buf_q_producer_index,
|
||||
&port_regs->CommonRegs.rxSmallQProducerIndex);
|
||||
mmiowb();
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -2181,7 +2181,6 @@ static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
|
||||
static inline void ql_write_db_reg(u32 val, void __iomem *addr)
|
||||
{
|
||||
writel(val, addr);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -2695,7 +2695,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
|
||||
wmb();
|
||||
|
||||
ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
|
||||
mmiowb();
|
||||
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
|
||||
"tx queued, slot %d, len %d\n",
|
||||
tx_ring->prod_idx, skb->len);
|
||||
|
新增問題並參考
封鎖使用者