RDMA: Update workqueue usage
* ib_wq is added, which is used as the common workqueue for infiniband instead of the system workqueue. All system workqueue usages including flush_scheduled_work() callers are converted to use and flush ib_wq. * cancel_delayed_work() + flush_scheduled_work() converted to cancel_delayed_work_sync(). * qib_wq is removed and ib_wq is used instead. This is to prepare for deprecation of flush_scheduled_work(). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Tento commit je obsažen v:
@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
|
||||
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
|
||||
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
|
||||
wake_up(&ppd->cpspec->autoneg_wait);
|
||||
cancel_delayed_work(&ppd->cpspec->autoneg_work);
|
||||
flush_scheduled_work();
|
||||
cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
|
||||
|
||||
shutdown_7220_relock_poll(ppd->dd);
|
||||
val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
|
||||
@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd)
|
||||
|
||||
toggle_7220_rclkrls(ppd->dd);
|
||||
/* 2 msec is minimum length of a poll cycle */
|
||||
schedule_delayed_work(&ppd->cpspec->autoneg_work,
|
||||
msecs_to_jiffies(2));
|
||||
queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
|
||||
msecs_to_jiffies(2));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -2406,10 +2406,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
|
||||
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
|
||||
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
|
||||
wake_up(&ppd->cpspec->autoneg_wait);
|
||||
cancel_delayed_work(&ppd->cpspec->autoneg_work);
|
||||
cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
|
||||
if (ppd->dd->cspec->r1)
|
||||
cancel_delayed_work(&ppd->cpspec->ipg_work);
|
||||
flush_scheduled_work();
|
||||
cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
|
||||
|
||||
ppd->cpspec->chase_end = 0;
|
||||
if (ppd->cpspec->chase_timer.data) /* if initted */
|
||||
@@ -2706,7 +2705,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
|
||||
if (!(pins & mask)) {
|
||||
++handled;
|
||||
qd->t_insert = get_jiffies_64();
|
||||
schedule_work(&qd->work);
|
||||
queue_work(ib_wq, &qd->work);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4990,8 +4989,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
|
||||
set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
|
||||
qib_7322_mini_pcs_reset(ppd);
|
||||
/* 2 msec is minimum length of a poll cycle */
|
||||
schedule_delayed_work(&ppd->cpspec->autoneg_work,
|
||||
msecs_to_jiffies(2));
|
||||
queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
|
||||
msecs_to_jiffies(2));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -5121,7 +5120,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
|
||||
ib_free_send_mad(send_buf);
|
||||
retry:
|
||||
delay = 2 << ppd->cpspec->ipg_tries;
|
||||
schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
|
||||
queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
|
||||
msecs_to_jiffies(delay));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
|
||||
module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
|
||||
|
||||
struct workqueue_struct *qib_wq;
|
||||
struct workqueue_struct *qib_cq_wq;
|
||||
|
||||
static void verify_interrupt(unsigned long);
|
||||
@@ -1044,24 +1043,10 @@ static int __init qlogic_ib_init(void)
|
||||
if (ret)
|
||||
goto bail;
|
||||
|
||||
/*
|
||||
* We create our own workqueue mainly because we want to be
|
||||
* able to flush it when devices are being removed. We can't
|
||||
* use schedule_work()/flush_scheduled_work() because both
|
||||
* unregister_netdev() and linkwatch_event take the rtnl lock,
|
||||
* so flush_scheduled_work() can deadlock during device
|
||||
* removal.
|
||||
*/
|
||||
qib_wq = create_workqueue("qib");
|
||||
if (!qib_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto bail_dev;
|
||||
}
|
||||
|
||||
qib_cq_wq = create_singlethread_workqueue("qib_cq");
|
||||
if (!qib_cq_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto bail_wq;
|
||||
goto bail_dev;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1091,8 +1076,6 @@ bail_unit:
|
||||
idr_destroy(&qib_unit_table);
|
||||
bail_cq_wq:
|
||||
destroy_workqueue(qib_cq_wq);
|
||||
bail_wq:
|
||||
destroy_workqueue(qib_wq);
|
||||
bail_dev:
|
||||
qib_dev_cleanup();
|
||||
bail:
|
||||
@@ -1116,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void)
|
||||
|
||||
pci_unregister_driver(&qib_driver);
|
||||
|
||||
destroy_workqueue(qib_wq);
|
||||
destroy_workqueue(qib_cq_wq);
|
||||
|
||||
qib_cpulist_count = 0;
|
||||
@@ -1289,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
|
||||
|
||||
if (qib_mini_init || initfail || ret) {
|
||||
qib_stop_timers(dd);
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(ib_wq);
|
||||
for (pidx = 0; pidx < dd->num_pports; ++pidx)
|
||||
dd->f_quiet_serdes(dd->pport + pidx);
|
||||
if (qib_mini_init)
|
||||
@@ -1338,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev)
|
||||
|
||||
qib_stop_timers(dd);
|
||||
|
||||
/* wait until all of our (qsfp) schedule_work() calls complete */
|
||||
flush_scheduled_work();
|
||||
/* wait until all of our (qsfp) queue_work() calls complete */
|
||||
flush_workqueue(ib_wq);
|
||||
|
||||
ret = qibfs_remove(dd);
|
||||
if (ret)
|
||||
|
@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
|
||||
goto bail;
|
||||
/* We see a module, but it may be unwise to look yet. Just schedule */
|
||||
qd->t_insert = get_jiffies_64();
|
||||
schedule_work(&qd->work);
|
||||
queue_work(ib_wq, &qd->work);
|
||||
bail:
|
||||
return;
|
||||
}
|
||||
@@ -493,10 +493,9 @@ bail:
|
||||
void qib_qsfp_deinit(struct qib_qsfp_data *qd)
|
||||
{
|
||||
/*
|
||||
* There is nothing to do here for now. our
|
||||
* work is scheduled with schedule_work(), and
|
||||
* flush_scheduled_work() from remove_one will
|
||||
* block until all work ssetup with schedule_work()
|
||||
* There is nothing to do here for now. our work is scheduled
|
||||
* with queue_work(), and flush_workqueue() from remove_one
|
||||
* will block until all work setup with queue_work()
|
||||
* completes.
|
||||
*/
|
||||
}
|
||||
|
@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp)
|
||||
!(qp->s_flags & QIB_S_ANY_WAIT_SEND));
|
||||
}
|
||||
|
||||
extern struct workqueue_struct *qib_wq;
|
||||
extern struct workqueue_struct *qib_cq_wq;
|
||||
|
||||
/*
|
||||
@@ -814,7 +813,7 @@ extern struct workqueue_struct *qib_cq_wq;
|
||||
static inline void qib_schedule_send(struct qib_qp *qp)
|
||||
{
|
||||
if (qib_send_ok(qp))
|
||||
queue_work(qib_wq, &qp->s_work);
|
||||
queue_work(ib_wq, &qp->s_work);
|
||||
}
|
||||
|
||||
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele