drivers: Remove explicit invocations of mmiowb()
mmiowb() is now implied by spin_unlock() on architectures that require it, so there is no reason to call it from driver code. This patch was generated using coccinelle: @mmiowb@ @@ - mmiowb(); and invoked as: $ for d in drivers include/linux/qed sound; do \ spatch --include-headers --sp-file mmiowb.cocci --dir $d --in-place; done NOTE: mmiowb() has only ever guaranteed ordering in conjunction with spin_unlock(). However, pairing each mmiowb() removal in this patch with the corresponding call to spin_unlock() is not at all trivial, so there is a small chance that this change may regress any drivers incorrectly relying on mmiowb() to order MMIO writes between CPUs using lock-free synchronisation. If you've ended up bisecting to this commit, you can reintroduce the mmiowb() calls using wmb() instead, which should restore the old behaviour on all architectures other than some esoteric ia64 systems. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
@@ -62,8 +62,7 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
|
||||
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
|
||||
writel((__bfa)->iocfc.req_cq_pi[__reqq], \
|
||||
(__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \
|
||||
mmiowb(); \
|
||||
} while (0)
|
||||
} while (0)
|
||||
|
||||
#define bfa_rspq_pi(__bfa, __rspq) \
|
||||
(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
|
||||
|
@@ -61,7 +61,6 @@ bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
|
||||
|
||||
bfa_rspq_ci(bfa, rspq) = ci;
|
||||
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -72,7 +71,6 @@ bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
|
||||
|
||||
bfa_rspq_ci(bfa, rspq) = ci;
|
||||
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
void
|
||||
|
@@ -81,7 +81,6 @@ bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
|
||||
|
||||
bfa_rspq_ci(bfa, rspq) = ci;
|
||||
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -94,7 +93,6 @@ bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
|
||||
{
|
||||
bfa_rspq_ci(bfa, rspq) = ci;
|
||||
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
void
|
||||
|
@@ -991,7 +991,6 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
|
||||
FCOE_CQE_TOGGLE_BIT_SHIFT);
|
||||
msg = *((u32 *)rx_db);
|
||||
writel(cpu_to_le32(msg), tgt->ctx_base);
|
||||
mmiowb();
|
||||
|
||||
}
|
||||
|
||||
@@ -1409,7 +1408,6 @@ void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
|
||||
(tgt->sq_curr_toggle_bit << 15);
|
||||
msg = *((u32 *)sq_db);
|
||||
writel(cpu_to_le32(msg), tgt->ctx_base);
|
||||
mmiowb();
|
||||
|
||||
}
|
||||
|
||||
|
@@ -253,7 +253,6 @@ void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
|
||||
writew(ep->qp.rq_prod_idx,
|
||||
ep->qp.ctx_base + CNIC_RECV_DOORBELL);
|
||||
}
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
|
||||
@@ -279,8 +278,6 @@ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
|
||||
bnx2i_ring_577xx_doorbell(bnx2i_conn);
|
||||
} else
|
||||
writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
|
||||
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
|
||||
|
@@ -815,7 +815,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
|
||||
&(regs)->inbound_high_queue_port);
|
||||
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
|
||||
&(regs)->inbound_low_queue_port);
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
}
|
||||
|
||||
|
@@ -242,7 +242,6 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
|
||||
&instance->reg_set->inbound_low_queue_port);
|
||||
writel(le32_to_cpu(req_desc->u.high),
|
||||
&instance->reg_set->inbound_high_queue_port);
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
#endif
|
||||
}
|
||||
|
@@ -3333,7 +3333,6 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
|
||||
spin_lock_irqsave(writeq_lock, flags);
|
||||
__raw_writel((u32)(b), addr);
|
||||
__raw_writel((u32)(b >> 32), (addr + 4));
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(writeq_lock, flags);
|
||||
}
|
||||
|
||||
|
@@ -807,7 +807,6 @@ void qedf_ring_doorbell(struct qedf_rport *fcport)
|
||||
writel(*(u32 *)&dbell, fcport->p_doorbell);
|
||||
/* Make sure SQ index is updated so f/w prcesses requests in order */
|
||||
wmb();
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
|
||||
|
@@ -985,7 +985,6 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
|
||||
* others they are two different assembly operations.
|
||||
*/
|
||||
wmb();
|
||||
mmiowb();
|
||||
QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
|
||||
"prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
|
||||
qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
|
||||
|
@@ -3004,8 +3004,6 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||
sp->flags |= SRB_SENT;
|
||||
ha->actthreads++;
|
||||
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
|
||||
/* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
|
||||
mmiowb();
|
||||
|
||||
out:
|
||||
if (status)
|
||||
@@ -3254,8 +3252,6 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||
sp->flags |= SRB_SENT;
|
||||
ha->actthreads++;
|
||||
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
|
||||
/* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
|
||||
mmiowb();
|
||||
|
||||
out:
|
||||
if (status)
|
||||
@@ -3379,7 +3375,6 @@ qla1280_isp_cmd(struct scsi_qla_host *ha)
|
||||
* See Documentation/driver-api/device-io.rst for more information.
|
||||
*/
|
||||
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
|
||||
mmiowb();
|
||||
|
||||
LEAVE("qla1280_isp_cmd");
|
||||
}
|
||||
|
Reference in New Issue
Block a user