Merge branches 'cma', 'cxgb3', 'cxgb4', 'ehca', 'iser', 'mad', 'nes', 'qib', 'srp' and 'srpt' into for-next

This commit is contained in:
18 gewijzigde bestanden met toevoegingen van 281 en 194 verwijderingen

Bestand weergeven

@@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
* Assumes qhp lock is held.
*/
static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
struct iwch_cq *schp, unsigned long *flag)
struct iwch_cq *schp)
{
int count;
int flushed;
@@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
/* take a ref on the qhp since we must release the lock */
atomic_inc(&qhp->refcnt);
spin_unlock_irqrestore(&qhp->lock, *flag);
spin_unlock(&qhp->lock);
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, *flag);
spin_lock(&rchp->lock);
spin_lock(&qhp->lock);
cxio_flush_hw_cq(&rchp->cq);
cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, *flag);
spin_unlock(&rchp->lock);
if (flushed) {
spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
spin_lock(&rchp->comp_handler_lock);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
spin_unlock(&rchp->comp_handler_lock);
}
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, *flag);
spin_lock(&schp->lock);
spin_lock(&qhp->lock);
cxio_flush_hw_cq(&schp->cq);
cxio_count_scqes(&schp->cq, &qhp->wq, &count);
flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, *flag);
spin_unlock(&schp->lock);
if (flushed) {
spin_lock_irqsave(&schp->comp_handler_lock, *flag);
spin_lock(&schp->comp_handler_lock);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
spin_unlock(&schp->comp_handler_lock);
}
/* deref */
if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait);
spin_lock_irqsave(&qhp->lock, *flag);
spin_lock(&qhp->lock);
}
static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
static void flush_qp(struct iwch_qp *qhp)
{
struct iwch_cq *rchp, *schp;
@@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
if (qhp->ibqp.uobject) {
cxio_set_wq_in_error(&qhp->wq);
cxio_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
spin_lock(&rchp->comp_handler_lock);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
spin_unlock(&rchp->comp_handler_lock);
if (schp != rchp) {
cxio_set_cq_in_error(&schp->cq);
spin_lock_irqsave(&schp->comp_handler_lock, *flag);
spin_lock(&schp->comp_handler_lock);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
spin_unlock(&schp->comp_handler_lock);
}
return;
}
__flush_qp(qhp, rchp, schp, flag);
__flush_qp(qhp, rchp, schp);
}
@@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
break;
case IWCH_QP_STATE_ERROR:
qhp->attr.state = IWCH_QP_STATE_ERROR;
flush_qp(qhp, &flag);
flush_qp(qhp);
break;
default:
ret = -EINVAL;
@@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
}
switch (attrs->next_state) {
case IWCH_QP_STATE_IDLE:
flush_qp(qhp, &flag);
flush_qp(qhp);
qhp->attr.state = IWCH_QP_STATE_IDLE;
qhp->attr.llp_stream_handle = NULL;
put_ep(&qhp->ep->com);
@@ -1132,7 +1132,7 @@ err:
free=1;
wake_up(&qhp->wait);
BUG_ON(!ep);
flush_qp(qhp, &flag);
flush_qp(qhp);
out:
spin_unlock_irqrestore(&qhp->lock, flag);

Bestand weergeven

@@ -1114,7 +1114,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* generated when moving QP to RTS state.
* A TERM message will be sent after QP has moved to RTS state
*/
if ((ep->mpa_attr.version == 2) &&
if ((ep->mpa_attr.version == 2) && peer2peer &&
(ep->mpa_attr.p2p_type != p2p_type)) {
ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
rtr_mismatch = 1;

Bestand weergeven

@@ -786,7 +786,8 @@ static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
spin_lock_init(&cct->task_lock);
INIT_LIST_HEAD(&cct->cq_list);
init_waitqueue_head(&cct->wait_queue);
cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu),
"ehca_comp/%d", cpu);
return cct->task;
}

Bestand weergeven

@@ -112,7 +112,7 @@ static u32 ehca_encode_hwpage_size(u32 pgsize)
static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
{
return 1UL << ilog2(shca->hca_cap_mr_pgsize);
return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
}
static struct ehca_mr *ehca_mr_new(void)

Bestand weergeven

@@ -338,18 +338,21 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
case IETF_MPA_V2: {
u16 ird_size;
u16 ord_size;
u16 rtr_ctrl_ird;
u16 rtr_ctrl_ord;
mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
mpa_hdr_len += IETF_RTR_MSG_SIZE;
cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE;
rtr_msg = &mpa_v2_frame->rtr_msg;
/* parse rtr message */
rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird);
rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord);
ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD;
ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD;
rtr_ctrl_ird = ntohs(rtr_msg->ctrl_ird);
rtr_ctrl_ord = ntohs(rtr_msg->ctrl_ord);
ird_size = rtr_ctrl_ird & IETF_NO_IRD_ORD;
ord_size = rtr_ctrl_ord & IETF_NO_IRD_ORD;
if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) {
if (!(rtr_ctrl_ird & IETF_PEER_TO_PEER)) {
/* send reset */
return -EINVAL;
}
@@ -370,9 +373,9 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
}
}
if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) {
if (rtr_ctrl_ord & IETF_RDMA0_READ) {
cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
} else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) {
} else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) {
cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
} else { /* Not supported RDMA0 operation */
return -EINVAL;
@@ -543,6 +546,8 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
{
struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
u16 ctrl_ird;
u16 ctrl_ord;
/* initialize the upper 5 bytes of the frame */
build_mpa_v1(cm_node, start_addr, mpa_key);
@@ -550,31 +555,31 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
/* initialize RTR msg */
rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
IETF_NO_IRD_ORD : cm_node->ird_size;
rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
IETF_NO_IRD_ORD : cm_node->ord_size;
rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER;
rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN;
ctrl_ird |= IETF_PEER_TO_PEER;
ctrl_ird |= IETF_FLPDU_ZERO_LEN;
switch (mpa_key) {
case MPA_KEY_REQUEST:
rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
ctrl_ord |= IETF_RDMA0_WRITE;
ctrl_ord |= IETF_RDMA0_READ;
break;
case MPA_KEY_REPLY:
switch (cm_node->send_rdma0_op) {
case SEND_RDMA_WRITE_ZERO:
rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
ctrl_ord |= IETF_RDMA0_WRITE;
break;
case SEND_RDMA_READ_ZERO:
rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
ctrl_ord |= IETF_RDMA0_READ;
break;
}
}
rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird);
rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord);
rtr_msg->ctrl_ird = htons(ctrl_ird);
rtr_msg->ctrl_ord = htons(ctrl_ord);
}
/**

Bestand weergeven

@@ -427,6 +427,14 @@ struct qib_verbs_txreq {
/* how often we check for packet activity for "power on hours (in seconds) */
#define ACTIVITY_TIMER 5
#define MAX_NAME_SIZE 64
struct qib_msix_entry {
struct msix_entry msix;
void *arg;
char name[MAX_NAME_SIZE];
cpumask_var_t mask;
};
/* Below is an opaque struct. Each chip (device) can maintain
* private data needed for its operation, but not germane to the
* rest of the driver. For convenience, we define another that
@@ -1355,7 +1363,7 @@ int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
const struct pci_device_id *);
void qib_pcie_ddcleanup(struct qib_devdata *);
int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *);
int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *);
int qib_reinit_intr(struct qib_devdata *);
void qib_enable_intx(struct pci_dev *);
void qib_nomsi(struct qib_devdata *);

Bestand weergeven

@@ -541,8 +541,7 @@ struct qib_chip_specific {
u32 lastbuf_for_pio;
u32 stay_in_freeze;
u32 recovery_ports_initted;
struct msix_entry *msix_entries;
void **msix_arg;
struct qib_msix_entry *msix_entries;
unsigned long *sendchkenable;
unsigned long *sendgrhchk;
unsigned long *sendibchk;
@@ -639,24 +638,24 @@ static struct {
int lsb;
int port; /* 0 if not port-specific, else port # */
} irq_table[] = {
{ QIB_DRV_NAME, qib_7322intr, -1, 0 },
{ QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
{ "", qib_7322intr, -1, 0 },
{ " (buf avail)", qib_7322bufavail,
SYM_LSB(IntStatus, SendBufAvail), 0 },
{ QIB_DRV_NAME " (sdma 0)", sdma_intr,
{ " (sdma 0)", sdma_intr,
SYM_LSB(IntStatus, SDmaInt_0), 1 },
{ QIB_DRV_NAME " (sdma 1)", sdma_intr,
{ " (sdma 1)", sdma_intr,
SYM_LSB(IntStatus, SDmaInt_1), 2 },
{ QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
{ " (sdmaI 0)", sdma_idle_intr,
SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
{ QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
{ " (sdmaI 1)", sdma_idle_intr,
SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
{ QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
{ " (sdmaP 0)", sdma_progress_intr,
SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
{ QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
{ " (sdmaP 1)", sdma_progress_intr,
SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
{ QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
{ " (sdmaC 0)", sdma_cleanup_intr,
SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
{ QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
{ " (sdmaC 1)", sdma_cleanup_intr,
SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
};
@@ -2567,9 +2566,13 @@ static void qib_7322_nomsix(struct qib_devdata *dd)
int i;
dd->cspec->num_msix_entries = 0;
for (i = 0; i < n; i++)
free_irq(dd->cspec->msix_entries[i].vector,
dd->cspec->msix_arg[i]);
for (i = 0; i < n; i++) {
irq_set_affinity_hint(
dd->cspec->msix_entries[i].msix.vector, NULL);
free_cpumask_var(dd->cspec->msix_entries[i].mask);
free_irq(dd->cspec->msix_entries[i].msix.vector,
dd->cspec->msix_entries[i].arg);
}
qib_nomsix(dd);
}
/* make sure no MSIx interrupts are left pending */
@@ -2597,7 +2600,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
kfree(dd->cspec->sendgrhchk);
kfree(dd->cspec->sendibchk);
kfree(dd->cspec->msix_entries);
kfree(dd->cspec->msix_arg);
for (i = 0; i < dd->num_pports; i++) {
unsigned long flags;
u32 mask = QSFP_GPIO_MOD_PRS_N |
@@ -3070,6 +3072,8 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
int ret, i, msixnum;
u64 redirect[6];
u64 mask;
const struct cpumask *local_mask;
int firstcpu, secondcpu = 0, currrcvcpu = 0;
if (!dd->num_pports)
return;
@@ -3118,13 +3122,28 @@ try_intx:
memset(redirect, 0, sizeof redirect);
mask = ~0ULL;
msixnum = 0;
local_mask = cpumask_of_pcibus(dd->pcidev->bus);
firstcpu = cpumask_first(local_mask);
if (firstcpu >= nr_cpu_ids ||
cpumask_weight(local_mask) == num_online_cpus()) {
local_mask = topology_core_cpumask(0);
firstcpu = cpumask_first(local_mask);
}
if (firstcpu < nr_cpu_ids) {
secondcpu = cpumask_next(firstcpu, local_mask);
if (secondcpu >= nr_cpu_ids)
secondcpu = firstcpu;
currrcvcpu = secondcpu;
}
for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
irq_handler_t handler;
const char *name;
void *arg;
u64 val;
int lsb, reg, sh;
dd->cspec->msix_entries[msixnum].
name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
= '\0';
if (i < ARRAY_SIZE(irq_table)) {
if (irq_table[i].port) {
/* skip if for a non-configured port */
@@ -3135,7 +3154,11 @@ try_intx:
arg = dd;
lsb = irq_table[i].lsb;
handler = irq_table[i].handler;
name = irq_table[i].name;
snprintf(dd->cspec->msix_entries[msixnum].name,
sizeof(dd->cspec->msix_entries[msixnum].name)
- 1,
QIB_DRV_NAME "%d%s", dd->unit,
irq_table[i].name);
} else {
unsigned ctxt;
@@ -3148,23 +3171,28 @@ try_intx:
continue;
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
handler = qib_7322pintr;
name = QIB_DRV_NAME " (kctx)";
snprintf(dd->cspec->msix_entries[msixnum].name,
sizeof(dd->cspec->msix_entries[msixnum].name)
- 1,
QIB_DRV_NAME "%d (kctx)", dd->unit);
}
ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
handler, 0, name, arg);
ret = request_irq(
dd->cspec->msix_entries[msixnum].msix.vector,
handler, 0, dd->cspec->msix_entries[msixnum].name,
arg);
if (ret) {
/*
* Shouldn't happen since the enable said we could
* have as many as we are trying to setup here.
*/
qib_dev_err(dd, "Couldn't setup MSIx "
"interrupt (vec=%d, irq=%d): %d\n", msixnum,
dd->cspec->msix_entries[msixnum].vector,
ret);
"interrupt (vec=%d, irq=%d): %d\n", msixnum,
dd->cspec->msix_entries[msixnum].msix.vector,
ret);
qib_7322_nomsix(dd);
goto try_intx;
}
dd->cspec->msix_arg[msixnum] = arg;
dd->cspec->msix_entries[msixnum].arg = arg;
if (lsb >= 0) {
reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
@@ -3174,6 +3202,25 @@ try_intx:
}
val = qib_read_kreg64(dd, 2 * msixnum + 1 +
(QIB_7322_MsixTable_OFFS / sizeof(u64)));
if (firstcpu < nr_cpu_ids &&
zalloc_cpumask_var(
&dd->cspec->msix_entries[msixnum].mask,
GFP_KERNEL)) {
if (handler == qib_7322pintr) {
cpumask_set_cpu(currrcvcpu,
dd->cspec->msix_entries[msixnum].mask);
currrcvcpu = cpumask_next(currrcvcpu,
local_mask);
if (currrcvcpu >= nr_cpu_ids)
currrcvcpu = secondcpu;
} else {
cpumask_set_cpu(firstcpu,
dd->cspec->msix_entries[msixnum].mask);
}
irq_set_affinity_hint(
dd->cspec->msix_entries[msixnum].msix.vector,
dd->cspec->msix_entries[msixnum].mask);
}
msixnum++;
}
/* Initialize the vector mapping */
@@ -3365,7 +3412,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
if (msix_entries) {
/* restore the MSIx vector address and data if saved above */
for (i = 0; i < msix_entries; i++) {
dd->cspec->msix_entries[i].entry = i;
dd->cspec->msix_entries[i].msix.entry = i;
if (!msix_vecsave || !msix_vecsave[2 * i])
continue;
qib_write_kreg(dd, 2 * i +
@@ -6865,15 +6912,13 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
tabsize = actual_cnt;
dd->cspec->msix_entries = kmalloc(tabsize *
sizeof(struct msix_entry), GFP_KERNEL);
dd->cspec->msix_arg = kmalloc(tabsize *
sizeof(void *), GFP_KERNEL);
if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
sizeof(struct qib_msix_entry), GFP_KERNEL);
if (!dd->cspec->msix_entries) {
qib_dev_err(dd, "No memory for MSIx table\n");
tabsize = 0;
}
for (i = 0; i < tabsize; i++)
dd->cspec->msix_entries[i].entry = i;
dd->cspec->msix_entries[i].msix.entry = i;
if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
qib_dev_err(dd, "Failed to setup PCIe or interrupts; "

Bestand weergeven

@@ -433,7 +433,6 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
struct qib_pportdata *ppd;
struct qib_ibport *ibp;
struct ib_port_info *pip = (struct ib_port_info *)smp->data;
u16 lid;
u8 mtu;
int ret;
u32 state;
@@ -469,8 +468,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
ibp->mkeyprot == 1))
pip->mkey = ibp->mkey;
pip->gid_prefix = ibp->gid_prefix;
lid = ppd->lid;
pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
pip->lid = cpu_to_be16(ppd->lid);
pip->sm_lid = cpu_to_be16(ibp->sm_lid);
pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
/* pip->diag_code; */

Bestand weergeven

@@ -194,11 +194,24 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
}
static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
struct msix_entry *msix_entry)
struct qib_msix_entry *qib_msix_entry)
{
int ret;
u32 tabsize = 0;
u16 msix_flags;
struct msix_entry *msix_entry;
int i;
/* We can't pass qib_msix_entry array to qib_msix_setup
* so use a dummy msix_entry array and copy the allocated
* irq back to the qib_msix_entry array. */
msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL);
if (!msix_entry) {
ret = -ENOMEM;
goto do_intx;
}
for (i = 0; i < *msixcnt; i++)
msix_entry[i] = qib_msix_entry[i].msix;
pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
@@ -209,11 +222,15 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
tabsize = ret;
ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
}
do_intx:
if (ret) {
qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
"falling back to INTx\n", tabsize, ret);
tabsize = 0;
}
for (i = 0; i < tabsize; i++)
qib_msix_entry[i].msix = msix_entry[i];
kfree(msix_entry);
*msixcnt = tabsize;
if (ret)
@@ -251,7 +268,7 @@ static int qib_msi_setup(struct qib_devdata *dd, int pos)
}
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
struct msix_entry *entry)
struct qib_msix_entry *entry)
{
u16 linkstat, speed;
int pos = 0, pose, ret = 1;