|
|
|
@@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q)
|
|
|
|
|
q->qdio_error = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int qdio_tasklet_schedule(struct qdio_q *q)
|
|
|
|
|
{
|
|
|
|
|
if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
|
|
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
return -EPERM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void __qdio_inbound_processing(struct qdio_q *q)
|
|
|
|
|
{
|
|
|
|
|
qperf_inc(q, tasklet_inbound);
|
|
|
|
@@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q)
|
|
|
|
|
if (!qdio_inbound_q_done(q)) {
|
|
|
|
|
/* means poll time is not yet over */
|
|
|
|
|
qperf_inc(q, tasklet_inbound_resched);
|
|
|
|
|
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
|
|
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
|
|
if (!qdio_tasklet_schedule(q))
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
qdio_stop_polling(q);
|
|
|
|
@@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q)
|
|
|
|
|
*/
|
|
|
|
|
if (!qdio_inbound_q_done(q)) {
|
|
|
|
|
qperf_inc(q, tasklet_inbound_resched2);
|
|
|
|
|
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
|
|
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
|
|
qdio_tasklet_schedule(q);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q)
|
|
|
|
|
* is noticed and outbound_handler is called after some time.
|
|
|
|
|
*/
|
|
|
|
|
if (qdio_outbound_q_done(q))
|
|
|
|
|
del_timer(&q->u.out.timer);
|
|
|
|
|
del_timer_sync(&q->u.out.timer);
|
|
|
|
|
else
|
|
|
|
|
if (!timer_pending(&q->u.out.timer))
|
|
|
|
|
if (!timer_pending(&q->u.out.timer) &&
|
|
|
|
|
likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
|
|
|
|
|
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
sched:
|
|
|
|
|
if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
|
|
|
|
|
return;
|
|
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
|
|
qdio_tasklet_schedule(q);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* outbound tasklet */
|
|
|
|
@@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data)
|
|
|
|
|
{
|
|
|
|
|
struct qdio_q *q = (struct qdio_q *)data;
|
|
|
|
|
|
|
|
|
|
if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
|
|
|
|
|
return;
|
|
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
|
|
qdio_tasklet_schedule(q);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
|
|
|
|
@@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
|
|
|
|
|
|
|
|
|
|
for_each_output_queue(q->irq_ptr, out, i)
|
|
|
|
|
if (!qdio_outbound_q_done(out))
|
|
|
|
|
tasklet_schedule(&out->tasklet);
|
|
|
|
|
qdio_tasklet_schedule(out);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void __tiqdio_inbound_processing(struct qdio_q *q)
|
|
|
|
@@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
|
|
|
|
|
|
|
|
|
|
if (!qdio_inbound_q_done(q)) {
|
|
|
|
|
qperf_inc(q, tasklet_inbound_resched);
|
|
|
|
|
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
|
|
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
|
|
if (!qdio_tasklet_schedule(q))
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
qdio_stop_polling(q);
|
|
|
|
@@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
|
|
|
|
|
*/
|
|
|
|
|
if (!qdio_inbound_q_done(q)) {
|
|
|
|
|
qperf_inc(q, tasklet_inbound_resched2);
|
|
|
|
|
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
|
|
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
|
|
qdio_tasklet_schedule(q);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
|
|
|
|
|
int i;
|
|
|
|
|
struct qdio_q *q;
|
|
|
|
|
|
|
|
|
|
if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
|
|
|
|
|
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
for_each_input_queue(irq_ptr, q, i) {
|
|
|
|
@@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
|
|
|
|
|
continue;
|
|
|
|
|
if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
|
|
|
|
|
qdio_siga_sync_q(q);
|
|
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
|
|
qdio_tasklet_schedule(q);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|
|
|
|
struct irb *irb)
|
|
|
|
|
{
|
|
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
|
|
|
|
struct subchannel_id schid;
|
|
|
|
|
int cstat, dstat;
|
|
|
|
|
|
|
|
|
|
if (!intparm || !irq_ptr) {
|
|
|
|
|
DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
|
|
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
|
DBF_ERROR("qint:%4x", schid.sch_no);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|
|
|
|
int qdio_get_ssqd_desc(struct ccw_device *cdev,
|
|
|
|
|
struct qdio_ssqd_desc *data)
|
|
|
|
|
{
|
|
|
|
|
struct subchannel_id schid;
|
|
|
|
|
|
|
|
|
|
if (!cdev || !cdev->private)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
|
|
|
|
|
return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
|
|
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
|
DBF_EVENT("get ssqd:%4x", schid.sch_no);
|
|
|
|
|
return qdio_setup_get_ssqd(NULL, &schid, data);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
|
|
|
|
|
|
|
|
|
@@ -1141,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
|
|
|
|
|
tasklet_kill(&q->tasklet);
|
|
|
|
|
|
|
|
|
|
for_each_output_queue(irq_ptr, q, i) {
|
|
|
|
|
del_timer(&q->u.out.timer);
|
|
|
|
|
del_timer_sync(&q->u.out.timer);
|
|
|
|
|
tasklet_kill(&q->tasklet);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@@ -1154,14 +1158,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
|
|
|
|
|
int qdio_shutdown(struct ccw_device *cdev, int how)
|
|
|
|
|
{
|
|
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
|
|
|
|
struct subchannel_id schid;
|
|
|
|
|
int rc;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(irqs_disabled());
|
|
|
|
|
DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
|
|
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
|
DBF_EVENT("qshutdown:%4x", schid.sch_no);
|
|
|
|
|
|
|
|
|
|
mutex_lock(&irq_ptr->setup_mutex);
|
|
|
|
|
/*
|
|
|
|
@@ -1184,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
|
|
|
|
|
qdio_shutdown_debug_entries(irq_ptr);
|
|
|
|
|
|
|
|
|
|
/* cleanup subchannel */
|
|
|
|
|
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
|
|
|
|
|
if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
|
|
|
|
|
rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
|
|
|
|
@@ -1198,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
|
|
|
|
|
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
|
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
wait_event_interruptible_timeout(cdev->private->wait_q,
|
|
|
|
|
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
|
|
|
|
|
irq_ptr->state == QDIO_IRQ_STATE_ERR,
|
|
|
|
|
10 * HZ);
|
|
|
|
|
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
|
|
|
|
|
no_cleanup:
|
|
|
|
|
qdio_shutdown_thinint(irq_ptr);
|
|
|
|
@@ -1211,7 +1216,7 @@ no_cleanup:
|
|
|
|
|
/* restore interrupt handler */
|
|
|
|
|
if ((void *)cdev->handler == (void *)qdio_int_handler)
|
|
|
|
|
cdev->handler = irq_ptr->orig_handler;
|
|
|
|
|
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
|
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
|
|
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
|
|
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
|
|
@@ -1228,11 +1233,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown);
|
|
|
|
|
int qdio_free(struct ccw_device *cdev)
|
|
|
|
|
{
|
|
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
|
|
|
|
struct subchannel_id schid;
|
|
|
|
|
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
|
|
DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
|
|
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
|
DBF_EVENT("qfree:%4x", schid.sch_no);
|
|
|
|
|
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
|
|
|
|
|
mutex_lock(&irq_ptr->setup_mutex);
|
|
|
|
|
|
|
|
|
@@ -1251,9 +1258,11 @@ EXPORT_SYMBOL_GPL(qdio_free);
|
|
|
|
|
*/
|
|
|
|
|
int qdio_allocate(struct qdio_initialize *init_data)
|
|
|
|
|
{
|
|
|
|
|
struct subchannel_id schid;
|
|
|
|
|
struct qdio_irq *irq_ptr;
|
|
|
|
|
|
|
|
|
|
DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
|
|
|
|
|
ccw_device_get_schid(init_data->cdev, &schid);
|
|
|
|
|
DBF_EVENT("qallocate:%4x", schid.sch_no);
|
|
|
|
|
|
|
|
|
|
if ((init_data->no_input_qs && !init_data->input_handler) ||
|
|
|
|
|
(init_data->no_output_qs && !init_data->output_handler))
|
|
|
|
@@ -1331,20 +1340,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
|
|
|
|
|
*/
|
|
|
|
|
int qdio_establish(struct qdio_initialize *init_data)
|
|
|
|
|
{
|
|
|
|
|
struct qdio_irq *irq_ptr;
|
|
|
|
|
struct ccw_device *cdev = init_data->cdev;
|
|
|
|
|
unsigned long saveflags;
|
|
|
|
|
struct subchannel_id schid;
|
|
|
|
|
struct qdio_irq *irq_ptr;
|
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
|
|
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
|
DBF_EVENT("qestablish:%4x", schid.sch_no);
|
|
|
|
|
|
|
|
|
|
irq_ptr = cdev->private->qdio_data;
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
|
|
if (cdev->private->state != DEV_STATE_ONLINE)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
mutex_lock(&irq_ptr->setup_mutex);
|
|
|
|
|
qdio_setup_irq(init_data);
|
|
|
|
|
|
|
|
|
@@ -1361,17 +1368,14 @@ int qdio_establish(struct qdio_initialize *init_data)
|
|
|
|
|
irq_ptr->ccw.count = irq_ptr->equeue.count;
|
|
|
|
|
irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
ccw_device_set_options_mask(cdev, 0);
|
|
|
|
|
|
|
|
|
|
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
|
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
if (rc) {
|
|
|
|
|
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
|
|
|
|
|
DBF_ERROR("rc:%4x", rc);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
|
|
|
|
|
|
|
|
|
|
if (rc) {
|
|
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
|
|
|
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
|
|
|
|
|
return rc;
|
|
|
|
@@ -1407,19 +1411,17 @@ EXPORT_SYMBOL_GPL(qdio_establish);
|
|
|
|
|
*/
|
|
|
|
|
int qdio_activate(struct ccw_device *cdev)
|
|
|
|
|
{
|
|
|
|
|
struct subchannel_id schid;
|
|
|
|
|
struct qdio_irq *irq_ptr;
|
|
|
|
|
int rc;
|
|
|
|
|
unsigned long saveflags;
|
|
|
|
|
|
|
|
|
|
DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
|
|
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
|
DBF_EVENT("qactivate:%4x", schid.sch_no);
|
|
|
|
|
|
|
|
|
|
irq_ptr = cdev->private->qdio_data;
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
|
|
if (cdev->private->state != DEV_STATE_ONLINE)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
mutex_lock(&irq_ptr->setup_mutex);
|
|
|
|
|
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
|
|
|
|
|
rc = -EBUSY;
|
|
|
|
@@ -1431,19 +1433,17 @@ int qdio_activate(struct ccw_device *cdev)
|
|
|
|
|
irq_ptr->ccw.count = irq_ptr->aqueue.count;
|
|
|
|
|
irq_ptr->ccw.cda = 0;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
|
|
|
|
|
|
|
|
|
|
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
|
|
|
|
|
0, DOIO_DENY_PREFETCH);
|
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
if (rc) {
|
|
|
|
|
DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
|
|
|
|
|
DBF_ERROR("rc:%4x", rc);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
|
|
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (is_thinint_irq(irq_ptr))
|
|
|
|
|
tiqdio_add_input_queues(irq_ptr);
|
|
|
|
@@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
|
|
|
|
|
|
|
|
|
|
/* in case of SIGA errors we must process the error immediately */
|
|
|
|
|
if (used >= q->u.out.scan_threshold || rc)
|
|
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
|
|
qdio_tasklet_schedule(q);
|
|
|
|
|
else
|
|
|
|
|
/* free the SBALs in case of no further traffic */
|
|
|
|
|
if (!timer_pending(&q->u.out.timer))
|
|
|
|
|
if (!timer_pending(&q->u.out.timer) &&
|
|
|
|
|
likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
|
|
|
|
|
mod_timer(&q->u.out.timer, jiffies + HZ);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|