Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA changes from Roland Dreier: - AF_IB (native IB addressing) for CMA from Sean Hefty - new mlx5 driver for Mellanox Connect-IB adapters (including post merge request fixes) - SRP fixes from Bart Van Assche (including fix to first merge request) - qib HW driver updates - resurrection of ocrdma HW driver development - uverbs conversion to create fds with O_CLOEXEC set - other small changes and fixes * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (66 commits) mlx5: Return -EFAULT instead of -EPERM IB/qib: Log all SDMA errors unconditionally IB/qib: Fix module-level leak mlx5_core: Adjust hca_cap.uar_page_sz to conform to Connect-IB spec IB/srp: Let srp_abort() return FAST_IO_FAIL if TL offline IB/uverbs: Use get_unused_fd_flags(O_CLOEXEC) instead of get_unused_fd() mlx5_core: Fixes for sparse warnings IB/mlx5: Make profile[] static in main.c mlx5: Fix parameter type of health_handler_t mlx5: Add driver for Mellanox Connect-IB adapters IB/core: Add reserved values to enums for low-level driver use IB/srp: Bump driver version and release date IB/srp: Make HCA completion vector configurable IB/srp: Maintain a single connection per I_T nexus IB/srp: Fail I/O fast if target offline IB/srp: Skip host settle delay IB/srp: Avoid skipping srp_reset_host() after a transport error IB/srp: Fix remove_one crash due to resource exhaustion IB/qib: New transmitter tunning settings for Dell 1.1 backplane IB/core: Fix error return code in add_port() ...
This commit is contained in:
@@ -5,3 +5,11 @@ config INFINIBAND_QIB
|
||||
This is a low-level driver for Intel PCIe QLE InfiniBand host
|
||||
channel adapters. This driver does not support the Intel
|
||||
HyperTransport card (model QHT7140).
|
||||
|
||||
config INFINIBAND_QIB_DCA
|
||||
bool "QIB DCA support"
|
||||
depends on INFINIBAND_QIB && DCA && SMP && GENERIC_HARDIRQS && !(INFINIBAND_QIB=y && DCA=m)
|
||||
default y
|
||||
---help---
|
||||
Setting this enables DCA support on some Intel chip sets
|
||||
with the iba7322 HCA.
|
||||
|
@@ -13,3 +13,4 @@ ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
|
||||
|
||||
ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
|
||||
ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
|
||||
ib_qib-$(CONFIG_DEBUG_FS) += qib_debugfs.o
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#ifndef _QIB_KERNEL_H
|
||||
#define _QIB_KERNEL_H
|
||||
/*
|
||||
* Copyright (c) 2012 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
@@ -51,6 +51,7 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "qib_common.h"
|
||||
#include "qib_verbs.h"
|
||||
@@ -114,6 +115,11 @@ struct qib_eep_log_mask {
|
||||
/*
|
||||
* Below contains all data related to a single context (formerly called port).
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct qib_opcode_stats_perctx;
|
||||
#endif
|
||||
|
||||
struct qib_ctxtdata {
|
||||
void **rcvegrbuf;
|
||||
dma_addr_t *rcvegrbuf_phys;
|
||||
@@ -154,6 +160,8 @@ struct qib_ctxtdata {
|
||||
*/
|
||||
/* instead of calculating it */
|
||||
unsigned ctxt;
|
||||
/* local node of context */
|
||||
int node_id;
|
||||
/* non-zero if ctxt is being shared. */
|
||||
u16 subctxt_cnt;
|
||||
/* non-zero if ctxt is being shared. */
|
||||
@@ -222,12 +230,15 @@ struct qib_ctxtdata {
|
||||
u8 redirect_seq_cnt;
|
||||
/* ctxt rcvhdrq head offset */
|
||||
u32 head;
|
||||
u32 pkt_count;
|
||||
/* lookaside fields */
|
||||
struct qib_qp *lookaside_qp;
|
||||
u32 lookaside_qpn;
|
||||
/* QPs waiting for context processing */
|
||||
struct list_head qp_wait_list;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/* verbs stats per CTX */
|
||||
struct qib_opcode_stats_perctx *opstats;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct qib_sge_state;
|
||||
@@ -428,9 +439,19 @@ struct qib_verbs_txreq {
|
||||
#define ACTIVITY_TIMER 5
|
||||
|
||||
#define MAX_NAME_SIZE 64
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
struct qib_irq_notify;
|
||||
#endif
|
||||
|
||||
struct qib_msix_entry {
|
||||
struct msix_entry msix;
|
||||
void *arg;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
int dca;
|
||||
int rcv;
|
||||
struct qib_irq_notify *notifier;
|
||||
#endif
|
||||
char name[MAX_NAME_SIZE];
|
||||
cpumask_var_t mask;
|
||||
};
|
||||
@@ -828,6 +849,9 @@ struct qib_devdata {
|
||||
struct qib_ctxtdata *);
|
||||
void (*f_writescratch)(struct qib_devdata *, u32);
|
||||
int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
|
||||
#endif
|
||||
|
||||
char *boardname; /* human readable board info */
|
||||
|
||||
@@ -1075,6 +1099,10 @@ struct qib_devdata {
|
||||
u16 psxmitwait_check_rate;
|
||||
/* high volume overflow errors defered to tasklet */
|
||||
struct tasklet_struct error_tasklet;
|
||||
/* per device cq worker */
|
||||
struct kthread_worker *worker;
|
||||
|
||||
int assigned_node_id; /* NUMA node closest to HCA */
|
||||
};
|
||||
|
||||
/* hol_state values */
|
||||
@@ -1154,7 +1182,7 @@ int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
|
||||
int qib_setup_eagerbufs(struct qib_ctxtdata *);
|
||||
void qib_set_ctxtcnt(struct qib_devdata *);
|
||||
int qib_create_ctxts(struct qib_devdata *dd);
|
||||
struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32);
|
||||
struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
|
||||
void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
|
||||
void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
|
||||
|
||||
@@ -1320,7 +1348,7 @@ static inline int __qib_sdma_running(struct qib_pportdata *ppd)
|
||||
return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
|
||||
}
|
||||
int qib_sdma_running(struct qib_pportdata *);
|
||||
|
||||
void dump_sdma_state(struct qib_pportdata *ppd);
|
||||
void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
|
||||
void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
|
||||
|
||||
@@ -1445,6 +1473,7 @@ extern unsigned qib_n_krcv_queues;
|
||||
extern unsigned qib_sdma_fetch_arb;
|
||||
extern unsigned qib_compat_ddr_negotiate;
|
||||
extern int qib_special_trigger;
|
||||
extern unsigned qib_numa_aware;
|
||||
|
||||
extern struct mutex qib_mutex;
|
||||
|
||||
@@ -1474,27 +1503,23 @@ extern struct mutex qib_mutex;
|
||||
* first to avoid possible serial port delays from printk.
|
||||
*/
|
||||
#define qib_early_err(dev, fmt, ...) \
|
||||
do { \
|
||||
dev_err(dev, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
dev_err(dev, fmt, ##__VA_ARGS__)
|
||||
|
||||
#define qib_dev_err(dd, fmt, ...) \
|
||||
do { \
|
||||
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
|
||||
qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
|
||||
qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
|
||||
|
||||
#define qib_dev_warn(dd, fmt, ...) \
|
||||
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
|
||||
qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
|
||||
|
||||
#define qib_dev_porterr(dd, port, fmt, ...) \
|
||||
do { \
|
||||
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
|
||||
qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
|
||||
##__VA_ARGS__); \
|
||||
} while (0)
|
||||
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
|
||||
qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define qib_devinfo(pcidev, fmt, ...) \
|
||||
do { \
|
||||
dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
|
||||
|
||||
/*
|
||||
* this is used for formatting hw error messages...
|
||||
|
@@ -279,7 +279,7 @@ struct qib_base_info {
|
||||
* may not be implemented; the user code must deal with this if it
|
||||
* cares, or it must abort after initialization reports the difference.
|
||||
*/
|
||||
#define QIB_USER_SWMINOR 11
|
||||
#define QIB_USER_SWMINOR 12
|
||||
|
||||
#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
|
||||
|
||||
|
@@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
@@ -34,8 +35,10 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "qib_verbs.h"
|
||||
#include "qib.h"
|
||||
|
||||
/**
|
||||
* qib_cq_enter - add a new entry to the completion queue
|
||||
@@ -102,13 +105,18 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
|
||||
if (cq->notify == IB_CQ_NEXT_COMP ||
|
||||
(cq->notify == IB_CQ_SOLICITED &&
|
||||
(solicited || entry->status != IB_WC_SUCCESS))) {
|
||||
cq->notify = IB_CQ_NONE;
|
||||
cq->triggered++;
|
||||
struct kthread_worker *worker;
|
||||
/*
|
||||
* This will cause send_complete() to be called in
|
||||
* another thread.
|
||||
*/
|
||||
queue_work(qib_cq_wq, &cq->comptask);
|
||||
smp_rmb();
|
||||
worker = cq->dd->worker;
|
||||
if (likely(worker)) {
|
||||
cq->notify = IB_CQ_NONE;
|
||||
cq->triggered++;
|
||||
queue_kthread_work(worker, &cq->comptask);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&cq->lock, flags);
|
||||
@@ -163,7 +171,7 @@ bail:
|
||||
return npolled;
|
||||
}
|
||||
|
||||
static void send_complete(struct work_struct *work)
|
||||
static void send_complete(struct kthread_work *work)
|
||||
{
|
||||
struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
|
||||
|
||||
@@ -287,11 +295,12 @@ struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
|
||||
* The number of entries should be >= the number requested or return
|
||||
* an error.
|
||||
*/
|
||||
cq->dd = dd_from_dev(dev);
|
||||
cq->ibcq.cqe = entries;
|
||||
cq->notify = IB_CQ_NONE;
|
||||
cq->triggered = 0;
|
||||
spin_lock_init(&cq->lock);
|
||||
INIT_WORK(&cq->comptask, send_complete);
|
||||
init_kthread_work(&cq->comptask, send_complete);
|
||||
wc->head = 0;
|
||||
wc->tail = 0;
|
||||
cq->queue = wc;
|
||||
@@ -323,7 +332,7 @@ int qib_destroy_cq(struct ib_cq *ibcq)
|
||||
struct qib_ibdev *dev = to_idev(ibcq->device);
|
||||
struct qib_cq *cq = to_icq(ibcq);
|
||||
|
||||
flush_work(&cq->comptask);
|
||||
flush_kthread_work(&cq->comptask);
|
||||
spin_lock(&dev->n_cqs_lock);
|
||||
dev->n_cqs_allocated--;
|
||||
spin_unlock(&dev->n_cqs_lock);
|
||||
@@ -483,3 +492,49 @@ bail_free:
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qib_cq_init(struct qib_devdata *dd)
|
||||
{
|
||||
int ret = 0;
|
||||
int cpu;
|
||||
struct task_struct *task;
|
||||
|
||||
if (dd->worker)
|
||||
return 0;
|
||||
dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
|
||||
if (!dd->worker)
|
||||
return -ENOMEM;
|
||||
init_kthread_worker(dd->worker);
|
||||
task = kthread_create_on_node(
|
||||
kthread_worker_fn,
|
||||
dd->worker,
|
||||
dd->assigned_node_id,
|
||||
"qib_cq%d", dd->unit);
|
||||
if (IS_ERR(task))
|
||||
goto task_fail;
|
||||
cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
|
||||
kthread_bind(task, cpu);
|
||||
wake_up_process(task);
|
||||
out:
|
||||
return ret;
|
||||
task_fail:
|
||||
ret = PTR_ERR(task);
|
||||
kfree(dd->worker);
|
||||
dd->worker = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
void qib_cq_exit(struct qib_devdata *dd)
|
||||
{
|
||||
struct kthread_worker *worker;
|
||||
|
||||
worker = dd->worker;
|
||||
if (!worker)
|
||||
return;
|
||||
/* blocks future queuing from send_complete() */
|
||||
dd->worker = NULL;
|
||||
smp_wmb();
|
||||
flush_kthread_worker(worker);
|
||||
kthread_stop(worker->task);
|
||||
kfree(worker);
|
||||
}
|
||||
|
283
drivers/infiniband/hw/qib/qib_debugfs.c
Normal file
283
drivers/infiniband/hw/qib/qib_debugfs.c
Normal file
@@ -0,0 +1,283 @@
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/*
|
||||
* Copyright (c) 2013 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include "qib.h"
|
||||
#include "qib_verbs.h"
|
||||
#include "qib_debugfs.h"
|
||||
|
||||
static struct dentry *qib_dbg_root;
|
||||
|
||||
#define DEBUGFS_FILE(name) \
|
||||
static const struct seq_operations _##name##_seq_ops = { \
|
||||
.start = _##name##_seq_start, \
|
||||
.next = _##name##_seq_next, \
|
||||
.stop = _##name##_seq_stop, \
|
||||
.show = _##name##_seq_show \
|
||||
}; \
|
||||
static int _##name##_open(struct inode *inode, struct file *s) \
|
||||
{ \
|
||||
struct seq_file *seq; \
|
||||
int ret; \
|
||||
ret = seq_open(s, &_##name##_seq_ops); \
|
||||
if (ret) \
|
||||
return ret; \
|
||||
seq = s->private_data; \
|
||||
seq->private = inode->i_private; \
|
||||
return 0; \
|
||||
} \
|
||||
static const struct file_operations _##name##_file_ops = { \
|
||||
.owner = THIS_MODULE, \
|
||||
.open = _##name##_open, \
|
||||
.read = seq_read, \
|
||||
.llseek = seq_lseek, \
|
||||
.release = seq_release \
|
||||
};
|
||||
|
||||
#define DEBUGFS_FILE_CREATE(name) \
|
||||
do { \
|
||||
struct dentry *ent; \
|
||||
ent = debugfs_create_file(#name , 0400, ibd->qib_ibdev_dbg, \
|
||||
ibd, &_##name##_file_ops); \
|
||||
if (!ent) \
|
||||
pr_warn("create of " #name " failed\n"); \
|
||||
} while (0)
|
||||
|
||||
static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
struct qib_opcode_stats_perctx *opstats;
|
||||
|
||||
if (*pos >= ARRAY_SIZE(opstats->stats))
|
||||
return NULL;
|
||||
return pos;
|
||||
}
|
||||
|
||||
static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
{
|
||||
struct qib_opcode_stats_perctx *opstats;
|
||||
|
||||
++*pos;
|
||||
if (*pos >= ARRAY_SIZE(opstats->stats))
|
||||
return NULL;
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
||||
static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
|
||||
{
|
||||
/* nothing allocated */
|
||||
}
|
||||
|
||||
static int _opcode_stats_seq_show(struct seq_file *s, void *v)
|
||||
{
|
||||
loff_t *spos = v;
|
||||
loff_t i = *spos, j;
|
||||
u64 n_packets = 0, n_bytes = 0;
|
||||
struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
|
||||
struct qib_devdata *dd = dd_from_dev(ibd);
|
||||
|
||||
for (j = 0; j < dd->first_user_ctxt; j++) {
|
||||
if (!dd->rcd[j])
|
||||
continue;
|
||||
n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
|
||||
n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
|
||||
}
|
||||
if (!n_packets && !n_bytes)
|
||||
return SEQ_SKIP;
|
||||
seq_printf(s, "%02llx %llu/%llu\n", i,
|
||||
(unsigned long long) n_packets,
|
||||
(unsigned long long) n_bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEBUGFS_FILE(opcode_stats)
|
||||
|
||||
static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
|
||||
struct qib_devdata *dd = dd_from_dev(ibd);
|
||||
|
||||
if (!*pos)
|
||||
return SEQ_START_TOKEN;
|
||||
if (*pos >= dd->first_user_ctxt)
|
||||
return NULL;
|
||||
return pos;
|
||||
}
|
||||
|
||||
static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
{
|
||||
struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
|
||||
struct qib_devdata *dd = dd_from_dev(ibd);
|
||||
|
||||
if (v == SEQ_START_TOKEN)
|
||||
return pos;
|
||||
|
||||
++*pos;
|
||||
if (*pos >= dd->first_user_ctxt)
|
||||
return NULL;
|
||||
return pos;
|
||||
}
|
||||
|
||||
static void _ctx_stats_seq_stop(struct seq_file *s, void *v)
|
||||
{
|
||||
/* nothing allocated */
|
||||
}
|
||||
|
||||
static int _ctx_stats_seq_show(struct seq_file *s, void *v)
|
||||
{
|
||||
loff_t *spos;
|
||||
loff_t i, j;
|
||||
u64 n_packets = 0;
|
||||
struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
|
||||
struct qib_devdata *dd = dd_from_dev(ibd);
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_puts(s, "Ctx:npkts\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
spos = v;
|
||||
i = *spos;
|
||||
|
||||
if (!dd->rcd[i])
|
||||
return SEQ_SKIP;
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
|
||||
n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
|
||||
|
||||
if (!n_packets)
|
||||
return SEQ_SKIP;
|
||||
|
||||
seq_printf(s, " %llu:%llu\n", i, n_packets);
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEBUGFS_FILE(ctx_stats)
|
||||
|
||||
static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
struct qib_qp_iter *iter;
|
||||
loff_t n = *pos;
|
||||
|
||||
iter = qib_qp_iter_init(s->private);
|
||||
if (!iter)
|
||||
return NULL;
|
||||
|
||||
while (n--) {
|
||||
if (qib_qp_iter_next(iter)) {
|
||||
kfree(iter);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
|
||||
loff_t *pos)
|
||||
{
|
||||
struct qib_qp_iter *iter = iter_ptr;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if (qib_qp_iter_next(iter)) {
|
||||
kfree(iter);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
|
||||
{
|
||||
/* nothing for now */
|
||||
}
|
||||
|
||||
static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
|
||||
{
|
||||
struct qib_qp_iter *iter = iter_ptr;
|
||||
|
||||
if (!iter)
|
||||
return 0;
|
||||
|
||||
qib_qp_iter_print(s, iter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEBUGFS_FILE(qp_stats)
|
||||
|
||||
void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
|
||||
{
|
||||
char name[10];
|
||||
|
||||
snprintf(name, sizeof(name), "qib%d", dd_from_dev(ibd)->unit);
|
||||
ibd->qib_ibdev_dbg = debugfs_create_dir(name, qib_dbg_root);
|
||||
if (!ibd->qib_ibdev_dbg) {
|
||||
pr_warn("create of %s failed\n", name);
|
||||
return;
|
||||
}
|
||||
DEBUGFS_FILE_CREATE(opcode_stats);
|
||||
DEBUGFS_FILE_CREATE(ctx_stats);
|
||||
DEBUGFS_FILE_CREATE(qp_stats);
|
||||
return;
|
||||
}
|
||||
|
||||
void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
|
||||
{
|
||||
if (!qib_dbg_root)
|
||||
goto out;
|
||||
debugfs_remove_recursive(ibd->qib_ibdev_dbg);
|
||||
out:
|
||||
ibd->qib_ibdev_dbg = NULL;
|
||||
}
|
||||
|
||||
void qib_dbg_init(void)
|
||||
{
|
||||
qib_dbg_root = debugfs_create_dir(QIB_DRV_NAME, NULL);
|
||||
if (!qib_dbg_root)
|
||||
pr_warn("init of debugfs failed\n");
|
||||
}
|
||||
|
||||
void qib_dbg_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(qib_dbg_root);
|
||||
qib_dbg_root = NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
45
drivers/infiniband/hw/qib/qib_debugfs.h
Normal file
45
drivers/infiniband/hw/qib/qib_debugfs.h
Normal file
@@ -0,0 +1,45 @@
|
||||
#ifndef _QIB_DEBUGFS_H
|
||||
#define _QIB_DEBUGFS_H
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/*
|
||||
* Copyright (c) 2013 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
struct qib_ibdev;
|
||||
void qib_dbg_ibdev_init(struct qib_ibdev *ibd);
|
||||
void qib_dbg_ibdev_exit(struct qib_ibdev *ibd);
|
||||
void qib_dbg_init(void);
|
||||
void qib_dbg_exit(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _QIB_DEBUGFS_H */
|
@@ -558,7 +558,6 @@ move_along:
|
||||
}
|
||||
|
||||
rcd->head = l;
|
||||
rcd->pkt_count += i;
|
||||
|
||||
/*
|
||||
* Iterate over all QPs waiting to respond.
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
@@ -1155,6 +1155,49 @@ static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
|
||||
return pollflag;
|
||||
}
|
||||
|
||||
static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
|
||||
{
|
||||
struct qib_filedata *fd = fp->private_data;
|
||||
const unsigned int weight = cpumask_weight(¤t->cpus_allowed);
|
||||
const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
|
||||
int local_cpu;
|
||||
|
||||
/*
|
||||
* If process has NOT already set it's affinity, select and
|
||||
* reserve a processor for it on the local NUMA node.
|
||||
*/
|
||||
if ((weight >= qib_cpulist_count) &&
|
||||
(cpumask_weight(local_mask) <= qib_cpulist_count)) {
|
||||
for_each_cpu(local_cpu, local_mask)
|
||||
if (!test_and_set_bit(local_cpu, qib_cpulist)) {
|
||||
fd->rec_cpu_num = local_cpu;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If process has NOT already set it's affinity, select and
|
||||
* reserve a processor for it, as a rendevous for all
|
||||
* users of the driver. If they don't actually later
|
||||
* set affinity to this cpu, or set it to some other cpu,
|
||||
* it just means that sooner or later we don't recommend
|
||||
* a cpu, and let the scheduler do it's best.
|
||||
*/
|
||||
if (weight >= qib_cpulist_count) {
|
||||
int cpu;
|
||||
cpu = find_first_zero_bit(qib_cpulist,
|
||||
qib_cpulist_count);
|
||||
if (cpu == qib_cpulist_count)
|
||||
qib_dev_err(dd,
|
||||
"no cpus avail for affinity PID %u\n",
|
||||
current->pid);
|
||||
else {
|
||||
__set_bit(cpu, qib_cpulist);
|
||||
fd->rec_cpu_num = cpu;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that userland and driver are compatible for subcontexts.
|
||||
*/
|
||||
@@ -1259,12 +1302,20 @@ bail:
|
||||
static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
|
||||
struct file *fp, const struct qib_user_info *uinfo)
|
||||
{
|
||||
struct qib_filedata *fd = fp->private_data;
|
||||
struct qib_devdata *dd = ppd->dd;
|
||||
struct qib_ctxtdata *rcd;
|
||||
void *ptmp = NULL;
|
||||
int ret;
|
||||
int numa_id;
|
||||
|
||||
rcd = qib_create_ctxtdata(ppd, ctxt);
|
||||
assign_ctxt_affinity(fp, dd);
|
||||
|
||||
numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
|
||||
cpu_to_node(fd->rec_cpu_num) :
|
||||
numa_node_id()) : dd->assigned_node_id;
|
||||
|
||||
rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
|
||||
|
||||
/*
|
||||
* Allocate memory for use in qib_tid_update() at open to
|
||||
@@ -1296,6 +1347,9 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
|
||||
goto bail;
|
||||
|
||||
bailerr:
|
||||
if (fd->rec_cpu_num != -1)
|
||||
__clear_bit(fd->rec_cpu_num, qib_cpulist);
|
||||
|
||||
dd->rcd[ctxt] = NULL;
|
||||
kfree(rcd);
|
||||
kfree(ptmp);
|
||||
@@ -1485,6 +1539,57 @@ static int qib_open(struct inode *in, struct file *fp)
|
||||
return fp->private_data ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int find_hca(unsigned int cpu, int *unit)
|
||||
{
|
||||
int ret = 0, devmax, npresent, nup, ndev;
|
||||
|
||||
*unit = -1;
|
||||
|
||||
devmax = qib_count_units(&npresent, &nup);
|
||||
if (!npresent) {
|
||||
ret = -ENXIO;
|
||||
goto done;
|
||||
}
|
||||
if (!nup) {
|
||||
ret = -ENETDOWN;
|
||||
goto done;
|
||||
}
|
||||
for (ndev = 0; ndev < devmax; ndev++) {
|
||||
struct qib_devdata *dd = qib_lookup(ndev);
|
||||
if (dd) {
|
||||
if (pcibus_to_node(dd->pcidev->bus) < 0) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (cpu_to_node(cpu) ==
|
||||
pcibus_to_node(dd->pcidev->bus)) {
|
||||
*unit = ndev;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int do_qib_user_sdma_queue_create(struct file *fp)
|
||||
{
|
||||
struct qib_filedata *fd = fp->private_data;
|
||||
struct qib_ctxtdata *rcd = fd->rcd;
|
||||
struct qib_devdata *dd = rcd->dd;
|
||||
|
||||
if (dd->flags & QIB_HAS_SEND_DMA)
|
||||
|
||||
fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
|
||||
dd->unit,
|
||||
rcd->ctxt,
|
||||
fd->subctxt);
|
||||
if (!fd->pq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get ctxt early, so can set affinity prior to memory allocation.
|
||||
*/
|
||||
@@ -1517,61 +1622,36 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
|
||||
if (qib_compatible_subctxts(swmajor, swminor) &&
|
||||
uinfo->spu_subctxt_cnt) {
|
||||
ret = find_shared_ctxt(fp, uinfo);
|
||||
if (ret) {
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
goto done_chk_sdma;
|
||||
if (ret > 0) {
|
||||
ret = do_qib_user_sdma_queue_create(fp);
|
||||
if (!ret)
|
||||
assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
|
||||
goto done_ok;
|
||||
}
|
||||
}
|
||||
|
||||
i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
|
||||
if (i_minor)
|
||||
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
|
||||
else
|
||||
else {
|
||||
int unit;
|
||||
const unsigned int cpu = cpumask_first(¤t->cpus_allowed);
|
||||
const unsigned int weight =
|
||||
cpumask_weight(¤t->cpus_allowed);
|
||||
|
||||
if (weight == 1 && !test_bit(cpu, qib_cpulist))
|
||||
if (!find_hca(cpu, &unit) && unit >= 0)
|
||||
if (!find_free_ctxt(unit, fp, uinfo)) {
|
||||
ret = 0;
|
||||
goto done_chk_sdma;
|
||||
}
|
||||
ret = get_a_ctxt(fp, uinfo, alg);
|
||||
|
||||
done_chk_sdma:
|
||||
if (!ret) {
|
||||
struct qib_filedata *fd = fp->private_data;
|
||||
const struct qib_ctxtdata *rcd = fd->rcd;
|
||||
const struct qib_devdata *dd = rcd->dd;
|
||||
unsigned int weight;
|
||||
|
||||
if (dd->flags & QIB_HAS_SEND_DMA) {
|
||||
fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
|
||||
dd->unit,
|
||||
rcd->ctxt,
|
||||
fd->subctxt);
|
||||
if (!fd->pq)
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* If process has NOT already set it's affinity, select and
|
||||
* reserve a processor for it, as a rendezvous for all
|
||||
* users of the driver. If they don't actually later
|
||||
* set affinity to this cpu, or set it to some other cpu,
|
||||
* it just means that sooner or later we don't recommend
|
||||
* a cpu, and let the scheduler do it's best.
|
||||
*/
|
||||
weight = cpumask_weight(tsk_cpus_allowed(current));
|
||||
if (!ret && weight >= qib_cpulist_count) {
|
||||
int cpu;
|
||||
cpu = find_first_zero_bit(qib_cpulist,
|
||||
qib_cpulist_count);
|
||||
if (cpu != qib_cpulist_count) {
|
||||
__set_bit(cpu, qib_cpulist);
|
||||
fd->rec_cpu_num = cpu;
|
||||
}
|
||||
} else if (weight == 1 &&
|
||||
test_bit(cpumask_first(tsk_cpus_allowed(current)),
|
||||
qib_cpulist))
|
||||
qib_devinfo(dd->pcidev,
|
||||
"%s PID %u affinity set to cpu %d; already allocated\n",
|
||||
current->comm, current->pid,
|
||||
cpumask_first(tsk_cpus_allowed(current)));
|
||||
}
|
||||
|
||||
done_chk_sdma:
|
||||
if (!ret)
|
||||
ret = do_qib_user_sdma_queue_create(fp);
|
||||
done_ok:
|
||||
mutex_unlock(&qib_mutex);
|
||||
|
||||
done:
|
||||
|
@@ -3464,6 +3464,13 @@ static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Dummy function, as 6120 boards never disable EEPROM Write */
|
||||
static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
|
||||
{
|
||||
@@ -3539,6 +3546,9 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
|
||||
dd->f_xgxs_reset = qib_6120_xgxs_reset;
|
||||
dd->f_writescratch = writescratch;
|
||||
dd->f_tempsense_rd = qib_6120_tempsense_rd;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
dd->f_notify_dca = qib_6120_notify_dca;
|
||||
#endif
|
||||
/*
|
||||
* Do remaining pcie setup and save pcie values in dd.
|
||||
* Any error printing is already done by the init code.
|
||||
|
@@ -4513,6 +4513,13 @@ bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Dummy function, as 7220 boards never disable EEPROM Write */
|
||||
static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
|
||||
{
|
||||
@@ -4587,6 +4594,9 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
|
||||
dd->f_xgxs_reset = qib_7220_xgxs_reset;
|
||||
dd->f_writescratch = writescratch;
|
||||
dd->f_tempsense_rd = qib_7220_tempsense_rd;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
dd->f_notify_dca = qib_7220_notify_dca;
|
||||
#endif
|
||||
/*
|
||||
* Do remaining pcie setup and save pcie values in dd.
|
||||
* Any error printing is already done by the init code.
|
||||
|
@@ -44,6 +44,9 @@
|
||||
#include <linux/module.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_smi.h>
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
#include <linux/dca.h>
|
||||
#endif
|
||||
|
||||
#include "qib.h"
|
||||
#include "qib_7322_regs.h"
|
||||
@@ -80,6 +83,7 @@ static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
|
||||
static void serdes_7322_los_enable(struct qib_pportdata *, int);
|
||||
static int serdes_7322_init_old(struct qib_pportdata *);
|
||||
static int serdes_7322_init_new(struct qib_pportdata *);
|
||||
static void dump_sdma_7322_state(struct qib_pportdata *);
|
||||
|
||||
#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
|
||||
|
||||
@@ -519,6 +523,14 @@ static const u8 qib_7322_physportstate[0x20] = {
|
||||
[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
struct qib_irq_notify {
|
||||
int rcv;
|
||||
void *arg;
|
||||
struct irq_affinity_notify notify;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct qib_chip_specific {
|
||||
u64 __iomem *cregbase;
|
||||
u64 *cntrs;
|
||||
@@ -546,6 +558,12 @@ struct qib_chip_specific {
|
||||
u32 lastbuf_for_pio;
|
||||
u32 stay_in_freeze;
|
||||
u32 recovery_ports_initted;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
u32 dca_ctrl;
|
||||
int rhdr_cpu[18];
|
||||
int sdma_cpu[2];
|
||||
u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
|
||||
#endif
|
||||
struct qib_msix_entry *msix_entries;
|
||||
unsigned long *sendchkenable;
|
||||
unsigned long *sendgrhchk;
|
||||
@@ -573,7 +591,7 @@ struct vendor_txdds_ent {
|
||||
static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
|
||||
|
||||
#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
|
||||
#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
|
||||
#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
|
||||
#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
|
||||
#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
|
||||
|
||||
@@ -635,6 +653,7 @@ struct qib_chippport_specific {
|
||||
u8 ibmalfusesnap;
|
||||
struct qib_qsfp_data qsfp_data;
|
||||
char epmsgbuf[192]; /* for port error interrupt msg buffer */
|
||||
char sdmamsgbuf[192]; /* for per-port sdma error messages */
|
||||
};
|
||||
|
||||
static struct {
|
||||
@@ -642,28 +661,76 @@ static struct {
|
||||
irq_handler_t handler;
|
||||
int lsb;
|
||||
int port; /* 0 if not port-specific, else port # */
|
||||
int dca;
|
||||
} irq_table[] = {
|
||||
{ "", qib_7322intr, -1, 0 },
|
||||
{ "", qib_7322intr, -1, 0, 0 },
|
||||
{ " (buf avail)", qib_7322bufavail,
|
||||
SYM_LSB(IntStatus, SendBufAvail), 0 },
|
||||
SYM_LSB(IntStatus, SendBufAvail), 0, 0},
|
||||
{ " (sdma 0)", sdma_intr,
|
||||
SYM_LSB(IntStatus, SDmaInt_0), 1 },
|
||||
SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
|
||||
{ " (sdma 1)", sdma_intr,
|
||||
SYM_LSB(IntStatus, SDmaInt_1), 2 },
|
||||
SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
|
||||
{ " (sdmaI 0)", sdma_idle_intr,
|
||||
SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
|
||||
SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
|
||||
{ " (sdmaI 1)", sdma_idle_intr,
|
||||
SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
|
||||
SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
|
||||
{ " (sdmaP 0)", sdma_progress_intr,
|
||||
SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
|
||||
SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
|
||||
{ " (sdmaP 1)", sdma_progress_intr,
|
||||
SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
|
||||
SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
|
||||
{ " (sdmaC 0)", sdma_cleanup_intr,
|
||||
SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
|
||||
SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
|
||||
{ " (sdmaC 1)", sdma_cleanup_intr,
|
||||
SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
|
||||
SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
|
||||
static const struct dca_reg_map {
|
||||
int shadow_inx;
|
||||
int lsb;
|
||||
u64 mask;
|
||||
u16 regno;
|
||||
} dca_rcvhdr_reg_map[] = {
|
||||
{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
|
||||
~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
|
||||
{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
|
||||
~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
|
||||
{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
|
||||
~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
|
||||
{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
|
||||
~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
|
||||
{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
|
||||
~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
|
||||
{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
|
||||
~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
|
||||
{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
|
||||
~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
|
||||
{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
|
||||
~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
|
||||
{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
|
||||
~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
|
||||
{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
|
||||
~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
|
||||
{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
|
||||
~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
|
||||
{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
|
||||
~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
|
||||
{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
|
||||
~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
|
||||
{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
|
||||
~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
|
||||
{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
|
||||
~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
|
||||
{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
|
||||
~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
|
||||
{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
|
||||
~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
|
||||
{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
|
||||
~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
|
||||
};
|
||||
#endif
|
||||
|
||||
/* ibcctrl bits */
|
||||
#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
|
||||
/* cycle through TS1/TS2 till OK */
|
||||
@@ -686,6 +753,13 @@ static void write_7322_init_portregs(struct qib_pportdata *);
|
||||
static void setup_7322_link_recovery(struct qib_pportdata *, u32);
|
||||
static void check_7322_rxe_status(struct qib_pportdata *);
|
||||
static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
static void qib_setup_dca(struct qib_devdata *dd);
|
||||
static void setup_dca_notifier(struct qib_devdata *dd,
|
||||
struct qib_msix_entry *m);
|
||||
static void reset_dca_notifier(struct qib_devdata *dd,
|
||||
struct qib_msix_entry *m);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* qib_read_ureg32 - read 32-bit virtualized per-context register
|
||||
@@ -1529,6 +1603,15 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
|
||||
|
||||
spin_lock_irqsave(&ppd->sdma_lock, flags);
|
||||
|
||||
if (errs != QIB_E_P_SDMAHALT) {
|
||||
/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
|
||||
qib_dev_porterr(dd, ppd->port,
|
||||
"SDMA %s 0x%016llx %s\n",
|
||||
qib_sdma_state_names[ppd->sdma_state.current_state],
|
||||
errs, ppd->cpspec->sdmamsgbuf);
|
||||
dump_sdma_7322_state(ppd);
|
||||
}
|
||||
|
||||
switch (ppd->sdma_state.current_state) {
|
||||
case qib_sdma_state_s00_hw_down:
|
||||
break;
|
||||
@@ -2084,6 +2167,29 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
|
||||
|
||||
qib_dev_err(dd, "%s hardware error\n", msg);
|
||||
|
||||
if (hwerrs &
|
||||
(SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
|
||||
SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
|
||||
int pidx = 0;
|
||||
int err;
|
||||
unsigned long flags;
|
||||
struct qib_pportdata *ppd = dd->pport;
|
||||
for (; pidx < dd->num_pports; ++pidx, ppd++) {
|
||||
err = 0;
|
||||
if (pidx == 0 && (hwerrs &
|
||||
SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
|
||||
err++;
|
||||
if (pidx == 1 && (hwerrs &
|
||||
SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
|
||||
err++;
|
||||
if (err) {
|
||||
spin_lock_irqsave(&ppd->sdma_lock, flags);
|
||||
dump_sdma_7322_state(ppd);
|
||||
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isfatal && !dd->diag_client) {
|
||||
qib_dev_err(dd,
|
||||
"Fatal Hardware Error, no longer usable, SN %.16s\n",
|
||||
@@ -2558,6 +2664,162 @@ static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
|
||||
qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
|
||||
static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
|
||||
{
|
||||
switch (event) {
|
||||
case DCA_PROVIDER_ADD:
|
||||
if (dd->flags & QIB_DCA_ENABLED)
|
||||
break;
|
||||
if (!dca_add_requester(&dd->pcidev->dev)) {
|
||||
qib_devinfo(dd->pcidev, "DCA enabled\n");
|
||||
dd->flags |= QIB_DCA_ENABLED;
|
||||
qib_setup_dca(dd);
|
||||
}
|
||||
break;
|
||||
case DCA_PROVIDER_REMOVE:
|
||||
if (dd->flags & QIB_DCA_ENABLED) {
|
||||
dca_remove_requester(&dd->pcidev->dev);
|
||||
dd->flags &= ~QIB_DCA_ENABLED;
|
||||
dd->cspec->dca_ctrl = 0;
|
||||
qib_write_kreg(dd, KREG_IDX(DCACtrlA),
|
||||
dd->cspec->dca_ctrl);
|
||||
}
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
|
||||
{
|
||||
struct qib_devdata *dd = rcd->dd;
|
||||
struct qib_chip_specific *cspec = dd->cspec;
|
||||
|
||||
if (!(dd->flags & QIB_DCA_ENABLED))
|
||||
return;
|
||||
if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
|
||||
const struct dca_reg_map *rmp;
|
||||
|
||||
cspec->rhdr_cpu[rcd->ctxt] = cpu;
|
||||
rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
|
||||
cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
|
||||
cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
|
||||
(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
|
||||
qib_devinfo(dd->pcidev,
|
||||
"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
|
||||
(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
|
||||
qib_write_kreg(dd, rmp->regno,
|
||||
cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
|
||||
cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
|
||||
qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
|
||||
{
|
||||
struct qib_devdata *dd = ppd->dd;
|
||||
struct qib_chip_specific *cspec = dd->cspec;
|
||||
unsigned pidx = ppd->port - 1;
|
||||
|
||||
if (!(dd->flags & QIB_DCA_ENABLED))
|
||||
return;
|
||||
if (cspec->sdma_cpu[pidx] != cpu) {
|
||||
cspec->sdma_cpu[pidx] = cpu;
|
||||
cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
|
||||
SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
|
||||
SYM_MASK(DCACtrlF, SendDma0DCAOPH));
|
||||
cspec->dca_rcvhdr_ctrl[4] |=
|
||||
(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
|
||||
(ppd->hw_pidx ?
|
||||
SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
|
||||
SYM_LSB(DCACtrlF, SendDma0DCAOPH));
|
||||
qib_devinfo(dd->pcidev,
|
||||
"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
|
||||
(long long) cspec->dca_rcvhdr_ctrl[4]);
|
||||
qib_write_kreg(dd, KREG_IDX(DCACtrlF),
|
||||
cspec->dca_rcvhdr_ctrl[4]);
|
||||
cspec->dca_ctrl |= ppd->hw_pidx ?
|
||||
SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
|
||||
SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
|
||||
qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
static void qib_setup_dca(struct qib_devdata *dd)
|
||||
{
|
||||
struct qib_chip_specific *cspec = dd->cspec;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
|
||||
cspec->rhdr_cpu[i] = -1;
|
||||
for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
|
||||
cspec->sdma_cpu[i] = -1;
|
||||
cspec->dca_rcvhdr_ctrl[0] =
|
||||
(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
|
||||
cspec->dca_rcvhdr_ctrl[1] =
|
||||
(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
|
||||
cspec->dca_rcvhdr_ctrl[2] =
|
||||
(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
|
||||
cspec->dca_rcvhdr_ctrl[3] =
|
||||
(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
|
||||
cspec->dca_rcvhdr_ctrl[4] =
|
||||
(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
|
||||
(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
|
||||
for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
|
||||
qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
|
||||
cspec->dca_rcvhdr_ctrl[i]);
|
||||
for (i = 0; i < cspec->num_msix_entries; i++)
|
||||
setup_dca_notifier(dd, &cspec->msix_entries[i]);
|
||||
}
|
||||
|
||||
static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
|
||||
const cpumask_t *mask)
|
||||
{
|
||||
struct qib_irq_notify *n =
|
||||
container_of(notify, struct qib_irq_notify, notify);
|
||||
int cpu = cpumask_first(mask);
|
||||
|
||||
if (n->rcv) {
|
||||
struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
|
||||
qib_update_rhdrq_dca(rcd, cpu);
|
||||
} else {
|
||||
struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
|
||||
qib_update_sdma_dca(ppd, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void qib_irq_notifier_release(struct kref *ref)
|
||||
{
|
||||
struct qib_irq_notify *n =
|
||||
container_of(ref, struct qib_irq_notify, notify.kref);
|
||||
struct qib_devdata *dd;
|
||||
|
||||
if (n->rcv) {
|
||||
struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
|
||||
dd = rcd->dd;
|
||||
} else {
|
||||
struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
|
||||
dd = ppd->dd;
|
||||
}
|
||||
qib_devinfo(dd->pcidev,
|
||||
"release on HCA notify 0x%p n 0x%p\n", ref, n);
|
||||
kfree(n);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Disable MSIx interrupt if enabled, call generic MSIx code
|
||||
* to cleanup, and clear pending MSIx interrupts.
|
||||
@@ -2575,6 +2837,9 @@ static void qib_7322_nomsix(struct qib_devdata *dd)
|
||||
|
||||
dd->cspec->num_msix_entries = 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
|
||||
#endif
|
||||
irq_set_affinity_hint(
|
||||
dd->cspec->msix_entries[i].msix.vector, NULL);
|
||||
free_cpumask_var(dd->cspec->msix_entries[i].mask);
|
||||
@@ -2602,6 +2867,15 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
if (dd->flags & QIB_DCA_ENABLED) {
|
||||
dca_remove_requester(&dd->pcidev->dev);
|
||||
dd->flags &= ~QIB_DCA_ENABLED;
|
||||
dd->cspec->dca_ctrl = 0;
|
||||
qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
|
||||
}
|
||||
#endif
|
||||
|
||||
qib_7322_free_irq(dd);
|
||||
kfree(dd->cspec->cntrs);
|
||||
kfree(dd->cspec->sendchkenable);
|
||||
@@ -3068,6 +3342,53 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
|
||||
static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
|
||||
{
|
||||
if (!m->dca)
|
||||
return;
|
||||
qib_devinfo(dd->pcidev,
|
||||
"Disabling notifier on HCA %d irq %d\n",
|
||||
dd->unit,
|
||||
m->msix.vector);
|
||||
irq_set_affinity_notifier(
|
||||
m->msix.vector,
|
||||
NULL);
|
||||
m->notifier = NULL;
|
||||
}
|
||||
|
||||
static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
|
||||
{
|
||||
struct qib_irq_notify *n;
|
||||
|
||||
if (!m->dca)
|
||||
return;
|
||||
n = kzalloc(sizeof(*n), GFP_KERNEL);
|
||||
if (n) {
|
||||
int ret;
|
||||
|
||||
m->notifier = n;
|
||||
n->notify.irq = m->msix.vector;
|
||||
n->notify.notify = qib_irq_notifier_notify;
|
||||
n->notify.release = qib_irq_notifier_release;
|
||||
n->arg = m->arg;
|
||||
n->rcv = m->rcv;
|
||||
qib_devinfo(dd->pcidev,
|
||||
"set notifier irq %d rcv %d notify %p\n",
|
||||
n->notify.irq, n->rcv, &n->notify);
|
||||
ret = irq_set_affinity_notifier(
|
||||
n->notify.irq,
|
||||
&n->notify);
|
||||
if (ret) {
|
||||
m->notifier = NULL;
|
||||
kfree(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set up our chip-specific interrupt handler.
|
||||
* The interrupt type has already been setup, so
|
||||
@@ -3149,6 +3470,9 @@ try_intx:
|
||||
void *arg;
|
||||
u64 val;
|
||||
int lsb, reg, sh;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
int dca = 0;
|
||||
#endif
|
||||
|
||||
dd->cspec->msix_entries[msixnum].
|
||||
name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
|
||||
@@ -3161,6 +3485,9 @@ try_intx:
|
||||
arg = dd->pport + irq_table[i].port - 1;
|
||||
} else
|
||||
arg = dd;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
dca = irq_table[i].dca;
|
||||
#endif
|
||||
lsb = irq_table[i].lsb;
|
||||
handler = irq_table[i].handler;
|
||||
snprintf(dd->cspec->msix_entries[msixnum].name,
|
||||
@@ -3178,6 +3505,9 @@ try_intx:
|
||||
continue;
|
||||
if (qib_krcvq01_no_msi && ctxt < 2)
|
||||
continue;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
dca = 1;
|
||||
#endif
|
||||
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
|
||||
handler = qib_7322pintr;
|
||||
snprintf(dd->cspec->msix_entries[msixnum].name,
|
||||
@@ -3203,6 +3533,11 @@ try_intx:
|
||||
goto try_intx;
|
||||
}
|
||||
dd->cspec->msix_entries[msixnum].arg = arg;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
dd->cspec->msix_entries[msixnum].dca = dca;
|
||||
dd->cspec->msix_entries[msixnum].rcv =
|
||||
handler == qib_7322pintr;
|
||||
#endif
|
||||
if (lsb >= 0) {
|
||||
reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
|
||||
sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
|
||||
@@ -6452,6 +6787,86 @@ static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
|
||||
qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
|
||||
}
|
||||
|
||||
/*
|
||||
* sdma_lock should be acquired before calling this routine
|
||||
*/
|
||||
static void dump_sdma_7322_state(struct qib_pportdata *ppd)
|
||||
{
|
||||
u64 reg, reg1, reg2;
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmastatus);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmastatus: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_sendctrl);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA sendctrl: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmabase);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmabase: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
|
||||
reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
|
||||
reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
|
||||
reg, reg1, reg2);
|
||||
|
||||
/* get bufuse bits, clear them, and print them again if non-zero */
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
|
||||
qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
|
||||
reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
|
||||
qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
|
||||
reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
|
||||
qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
|
||||
/* 0 and 1 should always be zero, so print as short form */
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
|
||||
reg, reg1, reg2);
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
|
||||
reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
|
||||
reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
|
||||
/* 0 and 1 should always be zero, so print as short form */
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
|
||||
reg, reg1, reg2);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmatail);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmatail: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmahead);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmahead: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmaheadaddr: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmalengen);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmalengen: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmadesccnt: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmaidlecnt: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmapriorityhld: 0x%016llx\n", reg);
|
||||
|
||||
reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA senddmareloadcnt: 0x%016llx\n", reg);
|
||||
|
||||
dump_sdma_state(ppd);
|
||||
}
|
||||
|
||||
static struct sdma_set_state_action sdma_7322_action_table[] = {
|
||||
[qib_sdma_state_s00_hw_down] = {
|
||||
.go_s99_running_tofalse = 1,
|
||||
@@ -6885,6 +7300,9 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
|
||||
dd->f_sdma_init_early = qib_7322_sdma_init_early;
|
||||
dd->f_writescratch = writescratch;
|
||||
dd->f_tempsense_rd = qib_7322_tempsense_rd;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
dd->f_notify_dca = qib_7322_notify_dca;
|
||||
#endif
|
||||
/*
|
||||
* Do remaining PCIe setup and save PCIe values in dd.
|
||||
* Any error printing is already done by the init code.
|
||||
@@ -6921,7 +7339,7 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
|
||||
actual_cnt -= dd->num_pports;
|
||||
|
||||
tabsize = actual_cnt;
|
||||
dd->cspec->msix_entries = kmalloc(tabsize *
|
||||
dd->cspec->msix_entries = kzalloc(tabsize *
|
||||
sizeof(struct qib_msix_entry), GFP_KERNEL);
|
||||
if (!dd->cspec->msix_entries) {
|
||||
qib_dev_err(dd, "No memory for MSIx table\n");
|
||||
@@ -6941,7 +7359,13 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
|
||||
|
||||
/* clear diagctrl register, in case diags were running and crashed */
|
||||
qib_write_kreg(dd, kr_hwdiagctrl, 0);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
if (!dca_add_requester(&pdev->dev)) {
|
||||
qib_devinfo(dd->pcidev, "DCA enabled\n");
|
||||
dd->flags |= QIB_DCA_ENABLED;
|
||||
qib_setup_dca(dd);
|
||||
}
|
||||
#endif
|
||||
goto bail;
|
||||
|
||||
bail_cleanup:
|
||||
@@ -7156,15 +7580,20 @@ static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
|
||||
{ 0, 0, 0, 1 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 2 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 2 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 3 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 4 }, /* QMH7342 backplane settings */
|
||||
{ 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
|
||||
};
|
||||
|
||||
static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
|
||||
@@ -7173,15 +7602,20 @@ static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
|
||||
{ 0, 0, 0, 7 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 8 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 8 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 9 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 10 }, /* QMH7342 backplane settings */
|
||||
{ 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
|
||||
};
|
||||
|
||||
static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
|
||||
@@ -7190,15 +7624,20 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
|
||||
{ 0, 1, 0, 5 }, /* QMH7342 backplane settings */
|
||||
{ 0, 1, 0, 6 }, /* QMH7342 backplane settings */
|
||||
{ 0, 1, 0, 8 }, /* QMH7342 backplane settings */
|
||||
{ 0, 1, 12, 10 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 12, 11 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 12, 12 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 12, 14 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 12, 6 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 12, 7 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 12, 8 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 0, 10 }, /* QMH7342 backplane settings */
|
||||
{ 0, 1, 0, 12 }, /* QMH7342 backplane settings */
|
||||
{ 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
|
||||
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
|
||||
{ 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
|
||||
};
|
||||
|
||||
static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
|
||||
|
@@ -39,10 +39,17 @@
|
||||
#include <linux/idr.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/printk.h>
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
#include <linux/dca.h>
|
||||
#endif
|
||||
|
||||
#include "qib.h"
|
||||
#include "qib_common.h"
|
||||
#include "qib_mad.h"
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include "qib_debugfs.h"
|
||||
#include "qib_verbs.h"
|
||||
#endif
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
|
||||
@@ -64,6 +71,11 @@ ushort qib_cfgctxts;
|
||||
module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
|
||||
MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
|
||||
|
||||
unsigned qib_numa_aware;
|
||||
module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(numa_aware,
|
||||
"0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
|
||||
|
||||
/*
|
||||
* If set, do not write to any regs if avoidable, hack to allow
|
||||
* check for deranged default register values.
|
||||
@@ -89,8 +101,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
|
||||
module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
|
||||
|
||||
struct workqueue_struct *qib_cq_wq;
|
||||
|
||||
static void verify_interrupt(unsigned long);
|
||||
|
||||
static struct idr qib_unit_table;
|
||||
@@ -121,6 +131,11 @@ int qib_create_ctxts(struct qib_devdata *dd)
|
||||
{
|
||||
unsigned i;
|
||||
int ret;
|
||||
int local_node_id = pcibus_to_node(dd->pcidev->bus);
|
||||
|
||||
if (local_node_id < 0)
|
||||
local_node_id = numa_node_id();
|
||||
dd->assigned_node_id = local_node_id;
|
||||
|
||||
/*
|
||||
* Allocate full ctxtcnt array, rather than just cfgctxts, because
|
||||
@@ -143,7 +158,8 @@ int qib_create_ctxts(struct qib_devdata *dd)
|
||||
continue;
|
||||
|
||||
ppd = dd->pport + (i % dd->num_pports);
|
||||
rcd = qib_create_ctxtdata(ppd, i);
|
||||
|
||||
rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
|
||||
if (!rcd) {
|
||||
qib_dev_err(dd,
|
||||
"Unable to allocate ctxtdata for Kernel ctxt, failing\n");
|
||||
@@ -161,20 +177,33 @@ done:
|
||||
/*
|
||||
* Common code for user and kernel context setup.
|
||||
*/
|
||||
struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
|
||||
struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
|
||||
int node_id)
|
||||
{
|
||||
struct qib_devdata *dd = ppd->dd;
|
||||
struct qib_ctxtdata *rcd;
|
||||
|
||||
rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
|
||||
rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id);
|
||||
if (rcd) {
|
||||
INIT_LIST_HEAD(&rcd->qp_wait_list);
|
||||
rcd->node_id = node_id;
|
||||
rcd->ppd = ppd;
|
||||
rcd->dd = dd;
|
||||
rcd->cnt = 1;
|
||||
rcd->ctxt = ctxt;
|
||||
dd->rcd[ctxt] = rcd;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
|
||||
rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
|
||||
GFP_KERNEL, node_id);
|
||||
if (!rcd->opstats) {
|
||||
kfree(rcd);
|
||||
qib_dev_err(dd,
|
||||
"Unable to allocate per ctxt stats buffer\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
dd->f_init_ctxt(rcd);
|
||||
|
||||
/*
|
||||
@@ -429,6 +458,7 @@ static int loadtime_init(struct qib_devdata *dd)
|
||||
dd->intrchk_timer.function = verify_interrupt;
|
||||
dd->intrchk_timer.data = (unsigned long) dd;
|
||||
|
||||
ret = qib_cq_init(dd);
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
@@ -944,6 +974,10 @@ void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
|
||||
vfree(rcd->subctxt_uregbase);
|
||||
vfree(rcd->subctxt_rcvegrbuf);
|
||||
vfree(rcd->subctxt_rcvhdr_base);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
kfree(rcd->opstats);
|
||||
rcd->opstats = NULL;
|
||||
#endif
|
||||
kfree(rcd);
|
||||
}
|
||||
|
||||
@@ -1033,7 +1067,6 @@ done:
|
||||
dd->f_set_armlaunch(dd, 1);
|
||||
}
|
||||
|
||||
|
||||
void qib_free_devdata(struct qib_devdata *dd)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -1043,6 +1076,9 @@ void qib_free_devdata(struct qib_devdata *dd)
|
||||
list_del(&dd->list);
|
||||
spin_unlock_irqrestore(&qib_devs_lock, flags);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
qib_dbg_ibdev_exit(&dd->verbs_dev);
|
||||
#endif
|
||||
ib_dealloc_device(&dd->verbs_dev.ibdev);
|
||||
}
|
||||
|
||||
@@ -1066,6 +1102,10 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
qib_dbg_ibdev_init(&dd->verbs_dev);
|
||||
#endif
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irqsave(&qib_devs_lock, flags);
|
||||
|
||||
@@ -1081,6 +1121,9 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
|
||||
if (ret < 0) {
|
||||
qib_early_err(&pdev->dev,
|
||||
"Could not allocate unit ID: error %d\n", -ret);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
qib_dbg_ibdev_exit(&dd->verbs_dev);
|
||||
#endif
|
||||
ib_dealloc_device(&dd->verbs_dev.ibdev);
|
||||
dd = ERR_PTR(ret);
|
||||
goto bail;
|
||||
@@ -1158,6 +1201,35 @@ struct pci_driver qib_driver = {
|
||||
.err_handler = &qib_pci_err_handler,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
|
||||
static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
|
||||
static struct notifier_block dca_notifier = {
|
||||
.notifier_call = qib_notify_dca,
|
||||
.next = NULL,
|
||||
.priority = 0
|
||||
};
|
||||
|
||||
static int qib_notify_dca_device(struct device *device, void *data)
|
||||
{
|
||||
struct qib_devdata *dd = dev_get_drvdata(device);
|
||||
unsigned long event = *(unsigned long *)data;
|
||||
|
||||
return dd->f_notify_dca(dd, event);
|
||||
}
|
||||
|
||||
static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
|
||||
void *p)
|
||||
{
|
||||
int rval;
|
||||
|
||||
rval = driver_for_each_device(&qib_driver.driver, NULL,
|
||||
&event, qib_notify_dca_device);
|
||||
return rval ? NOTIFY_BAD : NOTIFY_DONE;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do all the generic driver unit- and chip-independent memory
|
||||
* allocation and initialization.
|
||||
@@ -1170,22 +1242,22 @@ static int __init qlogic_ib_init(void)
|
||||
if (ret)
|
||||
goto bail;
|
||||
|
||||
qib_cq_wq = create_singlethread_workqueue("qib_cq");
|
||||
if (!qib_cq_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto bail_dev;
|
||||
}
|
||||
|
||||
/*
|
||||
* These must be called before the driver is registered with
|
||||
* the PCI subsystem.
|
||||
*/
|
||||
idr_init(&qib_unit_table);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
dca_register_notify(&dca_notifier);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
qib_dbg_init();
|
||||
#endif
|
||||
ret = pci_register_driver(&qib_driver);
|
||||
if (ret < 0) {
|
||||
pr_err("Unable to register driver: error %d\n", -ret);
|
||||
goto bail_unit;
|
||||
goto bail_dev;
|
||||
}
|
||||
|
||||
/* not fatal if it doesn't work */
|
||||
@@ -1193,10 +1265,14 @@ static int __init qlogic_ib_init(void)
|
||||
pr_err("Unable to register ipathfs\n");
|
||||
goto bail; /* all OK */
|
||||
|
||||
bail_unit:
|
||||
idr_destroy(&qib_unit_table);
|
||||
destroy_workqueue(qib_cq_wq);
|
||||
bail_dev:
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
dca_unregister_notify(&dca_notifier);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
qib_dbg_exit();
|
||||
#endif
|
||||
idr_destroy(&qib_unit_table);
|
||||
qib_dev_cleanup();
|
||||
bail:
|
||||
return ret;
|
||||
@@ -1217,9 +1293,13 @@ static void __exit qlogic_ib_cleanup(void)
|
||||
"Unable to cleanup counter filesystem: error %d\n",
|
||||
-ret);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
dca_unregister_notify(&dca_notifier);
|
||||
#endif
|
||||
pci_unregister_driver(&qib_driver);
|
||||
|
||||
destroy_workqueue(qib_cq_wq);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
qib_dbg_exit();
|
||||
#endif
|
||||
|
||||
qib_cpulist_count = 0;
|
||||
kfree(qib_cpulist);
|
||||
@@ -1270,7 +1350,7 @@ static void cleanup_device_data(struct qib_devdata *dd)
|
||||
if (dd->pageshadow) {
|
||||
struct page **tmpp = dd->pageshadow;
|
||||
dma_addr_t *tmpd = dd->physshadow;
|
||||
int i, cnt = 0;
|
||||
int i;
|
||||
|
||||
for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
|
||||
int ctxt_tidbase = ctxt * dd->rcvtidcnt;
|
||||
@@ -1283,13 +1363,13 @@ static void cleanup_device_data(struct qib_devdata *dd)
|
||||
PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
qib_release_user_pages(&tmpp[i], 1);
|
||||
tmpp[i] = NULL;
|
||||
cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
tmpp = dd->pageshadow;
|
||||
dd->pageshadow = NULL;
|
||||
vfree(tmpp);
|
||||
dd->physshadow = NULL;
|
||||
vfree(tmpd);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1311,6 +1391,7 @@ static void cleanup_device_data(struct qib_devdata *dd)
|
||||
}
|
||||
kfree(tmp);
|
||||
kfree(dd->boardname);
|
||||
qib_cq_exit(dd);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1483,6 +1564,7 @@ static void qib_remove_one(struct pci_dev *pdev)
|
||||
int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
|
||||
{
|
||||
unsigned amt;
|
||||
int old_node_id;
|
||||
|
||||
if (!rcd->rcvhdrq) {
|
||||
dma_addr_t phys_hdrqtail;
|
||||
@@ -1492,9 +1574,13 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
|
||||
sizeof(u32), PAGE_SIZE);
|
||||
gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
|
||||
GFP_USER : GFP_KERNEL;
|
||||
|
||||
old_node_id = dev_to_node(&dd->pcidev->dev);
|
||||
set_dev_node(&dd->pcidev->dev, rcd->node_id);
|
||||
rcd->rcvhdrq = dma_alloc_coherent(
|
||||
&dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
|
||||
gfp_flags | __GFP_COMP);
|
||||
set_dev_node(&dd->pcidev->dev, old_node_id);
|
||||
|
||||
if (!rcd->rcvhdrq) {
|
||||
qib_dev_err(dd,
|
||||
@@ -1510,9 +1596,11 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
|
||||
}
|
||||
|
||||
if (!(dd->flags & QIB_NODMA_RTAIL)) {
|
||||
set_dev_node(&dd->pcidev->dev, rcd->node_id);
|
||||
rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
|
||||
&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
|
||||
gfp_flags);
|
||||
set_dev_node(&dd->pcidev->dev, old_node_id);
|
||||
if (!rcd->rcvhdrtail_kvaddr)
|
||||
goto bail_free;
|
||||
rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
|
||||
@@ -1556,6 +1644,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
|
||||
unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
|
||||
size_t size;
|
||||
gfp_t gfp_flags;
|
||||
int old_node_id;
|
||||
|
||||
/*
|
||||
* GFP_USER, but without GFP_FS, so buffer cache can be
|
||||
@@ -1574,25 +1663,29 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
|
||||
size = rcd->rcvegrbuf_size;
|
||||
if (!rcd->rcvegrbuf) {
|
||||
rcd->rcvegrbuf =
|
||||
kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]),
|
||||
GFP_KERNEL);
|
||||
kzalloc_node(chunk * sizeof(rcd->rcvegrbuf[0]),
|
||||
GFP_KERNEL, rcd->node_id);
|
||||
if (!rcd->rcvegrbuf)
|
||||
goto bail;
|
||||
}
|
||||
if (!rcd->rcvegrbuf_phys) {
|
||||
rcd->rcvegrbuf_phys =
|
||||
kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
|
||||
GFP_KERNEL);
|
||||
kmalloc_node(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
|
||||
GFP_KERNEL, rcd->node_id);
|
||||
if (!rcd->rcvegrbuf_phys)
|
||||
goto bail_rcvegrbuf;
|
||||
}
|
||||
for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
|
||||
if (rcd->rcvegrbuf[e])
|
||||
continue;
|
||||
|
||||
old_node_id = dev_to_node(&dd->pcidev->dev);
|
||||
set_dev_node(&dd->pcidev->dev, rcd->node_id);
|
||||
rcd->rcvegrbuf[e] =
|
||||
dma_alloc_coherent(&dd->pcidev->dev, size,
|
||||
&rcd->rcvegrbuf_phys[e],
|
||||
gfp_flags);
|
||||
set_dev_node(&dd->pcidev->dev, old_node_id);
|
||||
if (!rcd->rcvegrbuf[e])
|
||||
goto bail_rcvegrbuf_phys;
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
@@ -35,6 +35,9 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/jhash.h>
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/seq_file.h>
|
||||
#endif
|
||||
|
||||
#include "qib.h"
|
||||
|
||||
@@ -222,8 +225,8 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
|
||||
unsigned long flags;
|
||||
unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
|
||||
|
||||
spin_lock_irqsave(&dev->qpt_lock, flags);
|
||||
atomic_inc(&qp->refcount);
|
||||
spin_lock_irqsave(&dev->qpt_lock, flags);
|
||||
|
||||
if (qp->ibqp.qp_num == 0)
|
||||
rcu_assign_pointer(ibp->qp0, qp);
|
||||
@@ -235,7 +238,6 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->qpt_lock, flags);
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -247,36 +249,39 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
|
||||
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
|
||||
unsigned long flags;
|
||||
int removed = 1;
|
||||
|
||||
spin_lock_irqsave(&dev->qpt_lock, flags);
|
||||
|
||||
if (rcu_dereference_protected(ibp->qp0,
|
||||
lockdep_is_held(&dev->qpt_lock)) == qp) {
|
||||
atomic_dec(&qp->refcount);
|
||||
rcu_assign_pointer(ibp->qp0, NULL);
|
||||
} else if (rcu_dereference_protected(ibp->qp1,
|
||||
lockdep_is_held(&dev->qpt_lock)) == qp) {
|
||||
atomic_dec(&qp->refcount);
|
||||
rcu_assign_pointer(ibp->qp1, NULL);
|
||||
} else {
|
||||
struct qib_qp *q;
|
||||
struct qib_qp __rcu **qpp;
|
||||
|
||||
removed = 0;
|
||||
qpp = &dev->qp_table[n];
|
||||
for (; (q = rcu_dereference_protected(*qpp,
|
||||
lockdep_is_held(&dev->qpt_lock))) != NULL;
|
||||
qpp = &q->next)
|
||||
if (q == qp) {
|
||||
atomic_dec(&qp->refcount);
|
||||
rcu_assign_pointer(*qpp,
|
||||
rcu_dereference_protected(qp->next,
|
||||
lockdep_is_held(&dev->qpt_lock)));
|
||||
removed = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->qpt_lock, flags);
|
||||
synchronize_rcu();
|
||||
if (removed) {
|
||||
synchronize_rcu();
|
||||
atomic_dec(&qp->refcount);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -334,26 +339,25 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
|
||||
{
|
||||
struct qib_qp *qp = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
if (unlikely(qpn <= 1)) {
|
||||
rcu_read_lock();
|
||||
if (qpn == 0)
|
||||
qp = rcu_dereference(ibp->qp0);
|
||||
else
|
||||
qp = rcu_dereference(ibp->qp1);
|
||||
if (qp)
|
||||
atomic_inc(&qp->refcount);
|
||||
} else {
|
||||
struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
|
||||
unsigned n = qpn_hash(dev, qpn);
|
||||
|
||||
rcu_read_lock();
|
||||
for (qp = rcu_dereference(dev->qp_table[n]); qp;
|
||||
qp = rcu_dereference(qp->next))
|
||||
if (qp->ibqp.qp_num == qpn)
|
||||
if (qp->ibqp.qp_num == qpn) {
|
||||
atomic_inc(&qp->refcount);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (qp)
|
||||
if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
|
||||
qp = NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
return qp;
|
||||
}
|
||||
@@ -1286,3 +1290,94 @@ void qib_get_credit(struct qib_qp *qp, u32 aeth)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
struct qib_qp_iter {
|
||||
struct qib_ibdev *dev;
|
||||
struct qib_qp *qp;
|
||||
int n;
|
||||
};
|
||||
|
||||
struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
|
||||
{
|
||||
struct qib_qp_iter *iter;
|
||||
|
||||
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
|
||||
if (!iter)
|
||||
return NULL;
|
||||
|
||||
iter->dev = dev;
|
||||
if (qib_qp_iter_next(iter)) {
|
||||
kfree(iter);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
int qib_qp_iter_next(struct qib_qp_iter *iter)
|
||||
{
|
||||
struct qib_ibdev *dev = iter->dev;
|
||||
int n = iter->n;
|
||||
int ret = 1;
|
||||
struct qib_qp *pqp = iter->qp;
|
||||
struct qib_qp *qp;
|
||||
|
||||
rcu_read_lock();
|
||||
for (; n < dev->qp_table_size; n++) {
|
||||
if (pqp)
|
||||
qp = rcu_dereference(pqp->next);
|
||||
else
|
||||
qp = rcu_dereference(dev->qp_table[n]);
|
||||
pqp = qp;
|
||||
if (qp) {
|
||||
if (iter->qp)
|
||||
atomic_dec(&iter->qp->refcount);
|
||||
atomic_inc(&qp->refcount);
|
||||
rcu_read_unlock();
|
||||
iter->qp = qp;
|
||||
iter->n = n;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (iter->qp)
|
||||
atomic_dec(&iter->qp->refcount);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char * const qp_type_str[] = {
|
||||
"SMI", "GSI", "RC", "UC", "UD",
|
||||
};
|
||||
|
||||
void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
|
||||
{
|
||||
struct qib_swqe *wqe;
|
||||
struct qib_qp *qp = iter->qp;
|
||||
|
||||
wqe = get_swqe_ptr(qp, qp->s_last);
|
||||
seq_printf(s,
|
||||
"N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
|
||||
iter->n,
|
||||
qp->ibqp.qp_num,
|
||||
qp_type_str[qp->ibqp.qp_type],
|
||||
qp->state,
|
||||
wqe->wr.opcode,
|
||||
qp->s_hdrwords,
|
||||
qp->s_flags,
|
||||
atomic_read(&qp->s_dma_busy),
|
||||
!list_empty(&qp->iowait),
|
||||
qp->timeout,
|
||||
wqe->ssn,
|
||||
qp->s_lsn,
|
||||
qp->s_last_psn,
|
||||
qp->s_psn, qp->s_next_psn,
|
||||
qp->s_sending_psn, qp->s_sending_hpsn,
|
||||
qp->s_last, qp->s_acked, qp->s_cur,
|
||||
qp->s_tail, qp->s_head, qp->s_size,
|
||||
qp->remote_qpn,
|
||||
qp->remote_ah_attr.dlid);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -708,6 +708,62 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* sdma_lock should be acquired before calling this routine
|
||||
*/
|
||||
void dump_sdma_state(struct qib_pportdata *ppd)
|
||||
{
|
||||
struct qib_sdma_desc *descq;
|
||||
struct qib_sdma_txreq *txp, *txpnext;
|
||||
__le64 *descqp;
|
||||
u64 desc[2];
|
||||
dma_addr_t addr;
|
||||
u16 gen, dwlen, dwoffset;
|
||||
u16 head, tail, cnt;
|
||||
|
||||
head = ppd->sdma_descq_head;
|
||||
tail = ppd->sdma_descq_tail;
|
||||
cnt = qib_sdma_descq_freecnt(ppd);
|
||||
descq = ppd->sdma_descq;
|
||||
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA ppd->sdma_descq_head: %u\n", head);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA ppd->sdma_descq_tail: %u\n", tail);
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA sdma_descq_freecnt: %u\n", cnt);
|
||||
|
||||
/* print info for each entry in the descriptor queue */
|
||||
while (head != tail) {
|
||||
char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
|
||||
|
||||
descqp = &descq[head].qw[0];
|
||||
desc[0] = le64_to_cpu(descqp[0]);
|
||||
desc[1] = le64_to_cpu(descqp[1]);
|
||||
flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
|
||||
flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
|
||||
flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
|
||||
flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
|
||||
flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
|
||||
addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
|
||||
gen = (desc[0] >> 30) & 3ULL;
|
||||
dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
|
||||
dwoffset = (desc[0] & 0x7ffULL) << 2;
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
|
||||
head, flags, addr, gen, dwlen, dwoffset);
|
||||
if (++head == ppd->sdma_descq_cnt)
|
||||
head = 0;
|
||||
}
|
||||
|
||||
/* print dma descriptor indices from the TX requests */
|
||||
list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
|
||||
list)
|
||||
qib_dev_porterr(ppd->dd, ppd->port,
|
||||
"SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
|
||||
txp->start_idx, txp->next_descq_idx);
|
||||
}
|
||||
|
||||
void qib_sdma_process_event(struct qib_pportdata *ppd,
|
||||
enum qib_sdma_events event)
|
||||
{
|
||||
|
@@ -645,9 +645,11 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
|
||||
} else
|
||||
goto drop;
|
||||
|
||||
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
|
||||
ibp->opstats[opcode & 0x7f].n_bytes += tlen;
|
||||
ibp->opstats[opcode & 0x7f].n_packets++;
|
||||
opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
rcd->opstats->stats[opcode].n_bytes += tlen;
|
||||
rcd->opstats->stats[opcode].n_packets++;
|
||||
#endif
|
||||
|
||||
/* Get the destination QP number. */
|
||||
qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
@@ -41,6 +41,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/completion.h>
|
||||
#include <rdma/ib_pack.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
@@ -267,7 +268,8 @@ struct qib_cq_wc {
|
||||
*/
|
||||
struct qib_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct work_struct comptask;
|
||||
struct kthread_work comptask;
|
||||
struct qib_devdata *dd;
|
||||
spinlock_t lock; /* protect changes in this struct */
|
||||
u8 notify;
|
||||
u8 triggered;
|
||||
@@ -658,6 +660,10 @@ struct qib_opcode_stats {
|
||||
u64 n_bytes; /* total number of bytes */
|
||||
};
|
||||
|
||||
struct qib_opcode_stats_perctx {
|
||||
struct qib_opcode_stats stats[128];
|
||||
};
|
||||
|
||||
struct qib_ibport {
|
||||
struct qib_qp __rcu *qp0;
|
||||
struct qib_qp __rcu *qp1;
|
||||
@@ -724,7 +730,6 @@ struct qib_ibport {
|
||||
u8 vl_high_limit;
|
||||
u8 sl_to_vl[16];
|
||||
|
||||
struct qib_opcode_stats opstats[128];
|
||||
};
|
||||
|
||||
|
||||
@@ -768,6 +773,10 @@ struct qib_ibdev {
|
||||
spinlock_t n_srqs_lock;
|
||||
u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
|
||||
spinlock_t n_mcast_grps_lock;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/* per HCA debugfs */
|
||||
struct dentry *qib_ibdev_dbg;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct qib_verbs_counters {
|
||||
@@ -832,8 +841,6 @@ static inline int qib_send_ok(struct qib_qp *qp)
|
||||
!(qp->s_flags & QIB_S_ANY_WAIT_SEND));
|
||||
}
|
||||
|
||||
extern struct workqueue_struct *qib_cq_wq;
|
||||
|
||||
/*
|
||||
* This must be called with s_lock held.
|
||||
*/
|
||||
@@ -910,6 +917,18 @@ void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
|
||||
|
||||
void qib_free_qpn_table(struct qib_qpn_table *qpt);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
struct qib_qp_iter;
|
||||
|
||||
struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
|
||||
|
||||
int qib_qp_iter_next(struct qib_qp_iter *iter);
|
||||
|
||||
void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
|
||||
|
||||
#endif
|
||||
|
||||
void qib_get_credit(struct qib_qp *qp, u32 aeth);
|
||||
|
||||
unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
|
||||
@@ -972,6 +991,10 @@ int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
|
||||
|
||||
int qib_destroy_srq(struct ib_srq *ibsrq);
|
||||
|
||||
int qib_cq_init(struct qib_devdata *dd);
|
||||
|
||||
void qib_cq_exit(struct qib_devdata *dd);
|
||||
|
||||
void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
|
||||
|
||||
int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
||||
|
Reference in New Issue
Block a user