Merge branch 'linux-2.6'
This commit is contained in:
@@ -519,7 +519,7 @@ extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
|
||||
extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
|
||||
extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
|
||||
extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
||||
extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
|
||||
extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
|
||||
/* CM */
|
||||
extern int c2_llp_connect(struct iw_cm_id *cm_id,
|
||||
|
@@ -217,17 +217,19 @@ int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
||||
return npolled;
|
||||
}
|
||||
|
||||
int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
|
||||
{
|
||||
struct c2_mq_shared __iomem *shared;
|
||||
struct c2_cq *cq;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
cq = to_c2cq(ibcq);
|
||||
shared = cq->mq.peer;
|
||||
|
||||
if (notify == IB_CQ_NEXT_COMP)
|
||||
if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
|
||||
writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
|
||||
else if (notify == IB_CQ_SOLICITED)
|
||||
else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
|
||||
writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
|
||||
else
|
||||
return -EINVAL;
|
||||
@@ -241,7 +243,13 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
*/
|
||||
readb(&shared->armed);
|
||||
|
||||
return 0;
|
||||
if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
|
||||
spin_lock_irqsave(&cq->lock, flags);
|
||||
ret = !c2_mq_empty(&cq->mq);
|
||||
spin_unlock_irqrestore(&cq->lock, flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
|
||||
|
@@ -290,7 +290,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries,
|
||||
static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
@@ -795,6 +795,7 @@ int c2_register_device(struct c2_dev *dev)
|
||||
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
|
||||
memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
|
||||
dev->ibdev.phys_port_cnt = 1;
|
||||
dev->ibdev.num_comp_vectors = 1;
|
||||
dev->ibdev.dma_device = &dev->pcidev->dev;
|
||||
dev->ibdev.query_device = c2_query_device;
|
||||
dev->ibdev.query_port = c2_query_port;
|
||||
|
@@ -114,7 +114,10 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -38,6 +38,7 @@
|
||||
#include "firmware_exports.h"
|
||||
|
||||
#define T3_MAX_SGE 4
|
||||
#define T3_MAX_INLINE 64
|
||||
|
||||
#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
|
||||
#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
|
||||
|
@@ -1109,6 +1109,15 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
|
||||
/*
|
||||
* We get 2 abort replies from the HW. The first one must
|
||||
* be ignored except for scribbling that we need one more.
|
||||
*/
|
||||
if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) {
|
||||
ep->flags |= ABORT_REQ_IN_PROGRESS;
|
||||
return CPL_RET_BUF_DONE;
|
||||
}
|
||||
|
||||
close_complete_upcall(ep);
|
||||
state_set(&ep->com, DEAD);
|
||||
release_ep_resources(ep);
|
||||
@@ -1189,6 +1198,7 @@ static int listen_stop(struct iwch_listen_ep *ep)
|
||||
}
|
||||
req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
|
||||
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
||||
req->cpu_idx = 0;
|
||||
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
|
||||
skb->priority = 1;
|
||||
ep->com.tdev->send(ep->com.tdev, skb);
|
||||
@@ -1475,6 +1485,15 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
int ret;
|
||||
int state;
|
||||
|
||||
/*
|
||||
* We get 2 peer aborts from the HW. The first one must
|
||||
* be ignored except for scribbling that we need one more.
|
||||
*/
|
||||
if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) {
|
||||
ep->flags |= PEER_ABORT_IN_PROGRESS;
|
||||
return CPL_RET_BUF_DONE;
|
||||
}
|
||||
|
||||
if (is_neg_adv_abort(req->status)) {
|
||||
PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
|
||||
ep->hwtid);
|
||||
|
@@ -143,6 +143,11 @@ enum iwch_ep_state {
|
||||
DEAD,
|
||||
};
|
||||
|
||||
enum iwch_ep_flags {
|
||||
PEER_ABORT_IN_PROGRESS = (1 << 0),
|
||||
ABORT_REQ_IN_PROGRESS = (1 << 1),
|
||||
};
|
||||
|
||||
struct iwch_ep_common {
|
||||
struct iw_cm_id *cm_id;
|
||||
struct iwch_qp *qp;
|
||||
@@ -181,6 +186,7 @@ struct iwch_ep {
|
||||
u16 plen;
|
||||
u32 ird;
|
||||
u32 ord;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
|
||||
|
@@ -139,7 +139,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries,
|
||||
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
|
||||
struct ib_ucontext *ib_context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
@@ -292,7 +292,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
{
|
||||
struct iwch_dev *rhp;
|
||||
struct iwch_cq *chp;
|
||||
@@ -303,7 +303,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
|
||||
chp = to_iwch_cq(ibcq);
|
||||
rhp = chp->rhp;
|
||||
if (notify == IB_CQ_SOLICITED)
|
||||
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
|
||||
cq_op = CQ_ARM_SE;
|
||||
else
|
||||
cq_op = CQ_ARM_AN;
|
||||
@@ -317,9 +317,11 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr);
|
||||
err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
|
||||
spin_unlock_irqrestore(&chp->lock, flag);
|
||||
if (err)
|
||||
if (err < 0)
|
||||
printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
|
||||
chp->cq.cqid);
|
||||
if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -780,6 +782,9 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
|
||||
if (rqsize > T3_MAX_RQ_SIZE)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (attrs->cap.max_inline_data > T3_MAX_INLINE)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/*
|
||||
* NOTE: The SQ and total WQ sizes don't need to be
|
||||
* a power of two. However, all the code assumes
|
||||
@@ -1107,6 +1112,7 @@ int iwch_register_device(struct iwch_dev *dev)
|
||||
dev->ibdev.node_type = RDMA_NODE_RNIC;
|
||||
memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
|
||||
dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
|
||||
dev->ibdev.num_comp_vectors = 1;
|
||||
dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
|
||||
dev->ibdev.query_device = iwch_query_device;
|
||||
dev->ibdev.query_port = iwch_query_port;
|
||||
|
@@ -471,43 +471,62 @@ int iwch_bind_mw(struct ib_qp *qp,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged)
|
||||
static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
|
||||
u8 *layer_type, u8 *ecode)
|
||||
{
|
||||
switch (t3err) {
|
||||
int status = TPT_ERR_INTERNAL_ERR;
|
||||
int tagged = 0;
|
||||
int opcode = -1;
|
||||
int rqtype = 0;
|
||||
int send_inv = 0;
|
||||
|
||||
if (rsp_msg) {
|
||||
status = CQE_STATUS(rsp_msg->cqe);
|
||||
opcode = CQE_OPCODE(rsp_msg->cqe);
|
||||
rqtype = RQ_TYPE(rsp_msg->cqe);
|
||||
send_inv = (opcode == T3_SEND_WITH_INV) ||
|
||||
(opcode == T3_SEND_WITH_SE_INV);
|
||||
tagged = (opcode == T3_RDMA_WRITE) ||
|
||||
(rqtype && (opcode == T3_READ_RESP));
|
||||
}
|
||||
|
||||
switch (status) {
|
||||
case TPT_ERR_STAG:
|
||||
if (tagged == 1) {
|
||||
*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
|
||||
*ecode = DDPT_INV_STAG;
|
||||
} else if (tagged == 2) {
|
||||
if (send_inv) {
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
|
||||
*ecode = RDMAP_CANT_INV_STAG;
|
||||
} else {
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_INV_STAG;
|
||||
}
|
||||
break;
|
||||
case TPT_ERR_PDID:
|
||||
case TPT_ERR_QPID:
|
||||
case TPT_ERR_ACCESS:
|
||||
if (tagged == 1) {
|
||||
*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
|
||||
*ecode = DDPT_STAG_NOT_ASSOC;
|
||||
} else if (tagged == 2) {
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
if ((opcode == T3_SEND_WITH_INV) ||
|
||||
(opcode == T3_SEND_WITH_SE_INV))
|
||||
*ecode = RDMAP_CANT_INV_STAG;
|
||||
else
|
||||
*ecode = RDMAP_STAG_NOT_ASSOC;
|
||||
}
|
||||
break;
|
||||
case TPT_ERR_QPID:
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_STAG_NOT_ASSOC;
|
||||
break;
|
||||
case TPT_ERR_ACCESS:
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_ACC_VIOL;
|
||||
break;
|
||||
case TPT_ERR_WRAP:
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_TO_WRAP;
|
||||
break;
|
||||
case TPT_ERR_BOUND:
|
||||
if (tagged == 1) {
|
||||
if (tagged) {
|
||||
*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
|
||||
*ecode = DDPT_BASE_BOUNDS;
|
||||
} else if (tagged == 2) {
|
||||
} else {
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_BASE_BOUNDS;
|
||||
} else {
|
||||
*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
|
||||
*ecode = DDPU_MSG_TOOBIG;
|
||||
}
|
||||
break;
|
||||
case TPT_ERR_INVALIDATE_SHARED_MR:
|
||||
@@ -591,8 +610,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
|
||||
{
|
||||
union t3_wr *wqe;
|
||||
struct terminate_message *term;
|
||||
int status;
|
||||
int tagged = 0;
|
||||
struct sk_buff *skb;
|
||||
|
||||
PDBG("%s %d\n", __FUNCTION__, __LINE__);
|
||||
@@ -610,17 +627,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
|
||||
|
||||
/* immediate data starts here. */
|
||||
term = (struct terminate_message *)wqe->send.sgl;
|
||||
if (rsp_msg) {
|
||||
status = CQE_STATUS(rsp_msg->cqe);
|
||||
if (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)
|
||||
tagged = 1;
|
||||
if ((CQE_OPCODE(rsp_msg->cqe) == T3_READ_REQ) ||
|
||||
(CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP))
|
||||
tagged = 2;
|
||||
} else {
|
||||
status = TPT_ERR_INTERNAL_ERR;
|
||||
}
|
||||
build_term_codes(status, &term->layer_etype, &term->ecode, tagged);
|
||||
build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
|
||||
build_fw_riwrh((void *)wqe, T3_WR_SEND,
|
||||
T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1,
|
||||
qhp->ep->hwtid, 5);
|
||||
|
@@ -113,7 +113,7 @@ struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
|
||||
struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
|
@@ -123,7 +123,7 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
|
||||
void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
|
||||
|
||||
|
||||
struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
|
||||
struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
|
||||
@@ -135,7 +135,7 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
|
||||
|
||||
int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
|
||||
|
||||
int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify);
|
||||
int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
|
||||
|
||||
struct ib_qp *ehca_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
|
@@ -313,6 +313,7 @@ int ehca_init_device(struct ehca_shca *shca)
|
||||
|
||||
shca->ib_device.node_type = RDMA_NODE_IB_CA;
|
||||
shca->ib_device.phys_port_cnt = shca->num_ports;
|
||||
shca->ib_device.num_comp_vectors = 1;
|
||||
shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev;
|
||||
shca->ib_device.query_device = ehca_query_device;
|
||||
shca->ib_device.query_port = ehca_query_port;
|
||||
@@ -375,7 +376,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10);
|
||||
ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0);
|
||||
if (IS_ERR(ibcq)) {
|
||||
ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
|
||||
return PTR_ERR(ibcq);
|
||||
|
@@ -634,11 +634,13 @@ poll_cq_exit0:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
|
||||
int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
|
||||
{
|
||||
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
|
||||
unsigned long spl_flags;
|
||||
int ret = 0;
|
||||
|
||||
switch (cq_notify) {
|
||||
switch (notify_flags & IB_CQ_SOLICITED_MASK) {
|
||||
case IB_CQ_SOLICITED:
|
||||
hipz_set_cqx_n0(my_cq, 1);
|
||||
break;
|
||||
@@ -649,5 +651,11 @@ int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
|
||||
spin_lock_irqsave(&my_cq->spinlock, spl_flags);
|
||||
ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
|
||||
spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -140,6 +140,14 @@ static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
|
||||
return cqe;
|
||||
}
|
||||
|
||||
static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
|
||||
{
|
||||
struct ehca_cqe *cqe = ipz_qeit_get(queue);
|
||||
u32 cqe_flags = cqe->cqe_flags;
|
||||
|
||||
return cqe_flags >> 7 == (queue->toggle_state & 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* returns and resets Queue Entry iterator
|
||||
* returns address (kv) of first Queue Entry
|
||||
|
@@ -204,7 +204,7 @@ static void send_complete(unsigned long data)
|
||||
*
|
||||
* Called by ib_create_cq() in the generic verbs code.
|
||||
*/
|
||||
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
|
||||
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
@@ -243,33 +243,21 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
|
||||
* See ipath_mmap() for details.
|
||||
*/
|
||||
if (udata && udata->outlen >= sizeof(__u64)) {
|
||||
struct ipath_mmap_info *ip;
|
||||
__u64 offset = (__u64) wc;
|
||||
int err;
|
||||
u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;
|
||||
|
||||
err = ib_copy_to_udata(udata, &offset, sizeof(offset));
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
goto bail_wc;
|
||||
}
|
||||
|
||||
/* Allocate info for ipath_mmap(). */
|
||||
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
|
||||
if (!ip) {
|
||||
cq->ip = ipath_create_mmap_info(dev, s, context, wc);
|
||||
if (!cq->ip) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto bail_wc;
|
||||
}
|
||||
cq->ip = ip;
|
||||
ip->context = context;
|
||||
ip->obj = wc;
|
||||
kref_init(&ip->ref);
|
||||
ip->mmap_cnt = 0;
|
||||
ip->size = PAGE_ALIGN(sizeof(*wc) +
|
||||
sizeof(struct ib_wc) * entries);
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
ip->next = dev->pending_mmaps;
|
||||
dev->pending_mmaps = ip;
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
|
||||
err = ib_copy_to_udata(udata, &cq->ip->offset,
|
||||
sizeof(cq->ip->offset));
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
goto bail_ip;
|
||||
}
|
||||
} else
|
||||
cq->ip = NULL;
|
||||
|
||||
@@ -277,12 +265,18 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
|
||||
if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
|
||||
spin_unlock(&dev->n_cqs_lock);
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto bail_wc;
|
||||
goto bail_ip;
|
||||
}
|
||||
|
||||
dev->n_cqs_allocated++;
|
||||
spin_unlock(&dev->n_cqs_lock);
|
||||
|
||||
if (cq->ip) {
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
|
||||
* The number of entries should be >= the number requested or return
|
||||
@@ -301,12 +295,12 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
|
||||
|
||||
goto done;
|
||||
|
||||
bail_ip:
|
||||
kfree(cq->ip);
|
||||
bail_wc:
|
||||
vfree(wc);
|
||||
|
||||
bail_cq:
|
||||
kfree(cq);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
@@ -340,17 +334,18 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
|
||||
/**
|
||||
* ipath_req_notify_cq - change the notification type for a completion queue
|
||||
* @ibcq: the completion queue
|
||||
* @notify: the type of notification to request
|
||||
* @notify_flags: the type of notification to request
|
||||
*
|
||||
* Returns 0 for success.
|
||||
*
|
||||
* This may be called from interrupt context. Also called by
|
||||
* ib_req_notify_cq() in the generic verbs code.
|
||||
*/
|
||||
int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
|
||||
{
|
||||
struct ipath_cq *cq = to_icq(ibcq);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&cq->lock, flags);
|
||||
/*
|
||||
@@ -358,9 +353,15 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
* any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
|
||||
*/
|
||||
if (cq->notify != IB_CQ_NEXT_COMP)
|
||||
cq->notify = notify;
|
||||
cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
|
||||
|
||||
if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
|
||||
cq->queue->head != cq->queue->tail)
|
||||
ret = 1;
|
||||
|
||||
spin_unlock_irqrestore(&cq->lock, flags);
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -443,13 +444,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
if (cq->ip) {
|
||||
struct ipath_ibdev *dev = to_idev(ibcq->device);
|
||||
struct ipath_mmap_info *ip = cq->ip;
|
||||
u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe;
|
||||
|
||||
ip->obj = wc;
|
||||
ip->size = PAGE_ALIGN(sizeof(*wc) +
|
||||
sizeof(struct ib_wc) * cqe);
|
||||
ipath_update_mmap_info(dev, ip, s, wc);
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
ip->next = dev->pending_mmaps;
|
||||
dev->pending_mmaps = ip;
|
||||
if (list_empty(&ip->pending_mmaps))
|
||||
list_add(&ip->pending_mmaps, &dev->pending_mmaps);
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
}
|
||||
|
||||
|
@@ -38,7 +38,6 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
|
||||
|
@@ -37,7 +37,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
|
@@ -46,6 +46,11 @@ void ipath_release_mmap_info(struct kref *ref)
|
||||
{
|
||||
struct ipath_mmap_info *ip =
|
||||
container_of(ref, struct ipath_mmap_info, ref);
|
||||
struct ipath_ibdev *dev = to_idev(ip->context->device);
|
||||
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
list_del(&ip->pending_mmaps);
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
|
||||
vfree(ip->obj);
|
||||
kfree(ip);
|
||||
@@ -60,14 +65,12 @@ static void ipath_vma_open(struct vm_area_struct *vma)
|
||||
struct ipath_mmap_info *ip = vma->vm_private_data;
|
||||
|
||||
kref_get(&ip->ref);
|
||||
ip->mmap_cnt++;
|
||||
}
|
||||
|
||||
static void ipath_vma_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct ipath_mmap_info *ip = vma->vm_private_data;
|
||||
|
||||
ip->mmap_cnt--;
|
||||
kref_put(&ip->ref, ipath_release_mmap_info);
|
||||
}
|
||||
|
||||
@@ -87,7 +90,7 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
struct ipath_ibdev *dev = to_idev(context->device);
|
||||
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
||||
unsigned long size = vma->vm_end - vma->vm_start;
|
||||
struct ipath_mmap_info *ip, **pp;
|
||||
struct ipath_mmap_info *ip, *pp;
|
||||
int ret = -EINVAL;
|
||||
|
||||
/*
|
||||
@@ -96,15 +99,16 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
* CQ, QP, or SRQ is soon followed by a call to mmap().
|
||||
*/
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) {
|
||||
list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
|
||||
pending_mmaps) {
|
||||
/* Only the creator is allowed to mmap the object */
|
||||
if (context != ip->context || (void *) offset != ip->obj)
|
||||
if (context != ip->context || (__u64) offset != ip->offset)
|
||||
continue;
|
||||
/* Don't allow a mmap larger than the object. */
|
||||
if (size > ip->size)
|
||||
break;
|
||||
|
||||
*pp = ip->next;
|
||||
list_del_init(&ip->pending_mmaps);
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
|
||||
ret = remap_vmalloc_range(vma, ip->obj, 0);
|
||||
@@ -119,3 +123,51 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate information for ipath_mmap
|
||||
*/
|
||||
struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
|
||||
u32 size,
|
||||
struct ib_ucontext *context,
|
||||
void *obj) {
|
||||
struct ipath_mmap_info *ip;
|
||||
|
||||
ip = kmalloc(sizeof *ip, GFP_KERNEL);
|
||||
if (!ip)
|
||||
goto bail;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
spin_lock_irq(&dev->mmap_offset_lock);
|
||||
if (dev->mmap_offset == 0)
|
||||
dev->mmap_offset = PAGE_SIZE;
|
||||
ip->offset = dev->mmap_offset;
|
||||
dev->mmap_offset += size;
|
||||
spin_unlock_irq(&dev->mmap_offset_lock);
|
||||
|
||||
INIT_LIST_HEAD(&ip->pending_mmaps);
|
||||
ip->size = size;
|
||||
ip->context = context;
|
||||
ip->obj = obj;
|
||||
kref_init(&ip->ref);
|
||||
|
||||
bail:
|
||||
return ip;
|
||||
}
|
||||
|
||||
void ipath_update_mmap_info(struct ipath_ibdev *dev,
|
||||
struct ipath_mmap_info *ip,
|
||||
u32 size, void *obj) {
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
spin_lock_irq(&dev->mmap_offset_lock);
|
||||
if (dev->mmap_offset == 0)
|
||||
dev->mmap_offset = PAGE_SIZE;
|
||||
ip->offset = dev->mmap_offset;
|
||||
dev->mmap_offset += size;
|
||||
spin_unlock_irq(&dev->mmap_offset_lock);
|
||||
|
||||
ip->size = size;
|
||||
ip->obj = obj;
|
||||
}
|
||||
|
@@ -844,34 +844,36 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
|
||||
* See ipath_mmap() for details.
|
||||
*/
|
||||
if (udata && udata->outlen >= sizeof(__u64)) {
|
||||
struct ipath_mmap_info *ip;
|
||||
__u64 offset = (__u64) qp->r_rq.wq;
|
||||
int err;
|
||||
|
||||
err = ib_copy_to_udata(udata, &offset, sizeof(offset));
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
goto bail_rwq;
|
||||
}
|
||||
if (!qp->r_rq.wq) {
|
||||
__u64 offset = 0;
|
||||
|
||||
if (qp->r_rq.wq) {
|
||||
/* Allocate info for ipath_mmap(). */
|
||||
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
|
||||
if (!ip) {
|
||||
err = ib_copy_to_udata(udata, &offset,
|
||||
sizeof(offset));
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
goto bail_rwq;
|
||||
}
|
||||
} else {
|
||||
u32 s = sizeof(struct ipath_rwq) +
|
||||
qp->r_rq.size * sz;
|
||||
|
||||
qp->ip =
|
||||
ipath_create_mmap_info(dev, s,
|
||||
ibpd->uobject->context,
|
||||
qp->r_rq.wq);
|
||||
if (!qp->ip) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto bail_rwq;
|
||||
}
|
||||
qp->ip = ip;
|
||||
ip->context = ibpd->uobject->context;
|
||||
ip->obj = qp->r_rq.wq;
|
||||
kref_init(&ip->ref);
|
||||
ip->mmap_cnt = 0;
|
||||
ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
|
||||
qp->r_rq.size * sz);
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
ip->next = dev->pending_mmaps;
|
||||
dev->pending_mmaps = ip;
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
|
||||
err = ib_copy_to_udata(udata, &(qp->ip->offset),
|
||||
sizeof(qp->ip->offset));
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
goto bail_ip;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -885,6 +887,12 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
|
||||
dev->n_qps_allocated++;
|
||||
spin_unlock(&dev->n_qps_lock);
|
||||
|
||||
if (qp->ip) {
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
}
|
||||
|
||||
ret = &qp->ibqp;
|
||||
goto bail;
|
||||
|
||||
|
@@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
|
||||
case OP(RDMA_READ_RESPONSE_LAST):
|
||||
case OP(RDMA_READ_RESPONSE_ONLY):
|
||||
case OP(ATOMIC_ACKNOWLEDGE):
|
||||
qp->s_ack_state = OP(ACKNOWLEDGE);
|
||||
/*
|
||||
* We can increment the tail pointer now that the last
|
||||
* response has been sent instead of only being
|
||||
* constructed.
|
||||
*/
|
||||
if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
|
||||
qp->s_tail_ack_queue = 0;
|
||||
/* FALLTHROUGH */
|
||||
case OP(SEND_ONLY):
|
||||
case OP(ACKNOWLEDGE):
|
||||
/* Check for no next entry in the queue. */
|
||||
if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
|
||||
if (qp->s_flags & IPATH_S_ACK_PENDING)
|
||||
goto normal;
|
||||
qp->s_ack_state = OP(ACKNOWLEDGE);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
@@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
|
||||
if (len > pmtu) {
|
||||
len = pmtu;
|
||||
qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
|
||||
} else {
|
||||
} else
|
||||
qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
|
||||
if (++qp->s_tail_ack_queue >
|
||||
IPATH_MAX_RDMA_ATOMIC)
|
||||
qp->s_tail_ack_queue = 0;
|
||||
}
|
||||
ohdr->u.aeth = ipath_compute_aeth(qp);
|
||||
hwords++;
|
||||
qp->s_ack_rdma_psn = e->psn;
|
||||
@@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
|
||||
cpu_to_be32(e->atomic_data);
|
||||
hwords += sizeof(ohdr->u.at) / sizeof(u32);
|
||||
bth2 = e->psn;
|
||||
if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
|
||||
qp->s_tail_ack_queue = 0;
|
||||
}
|
||||
bth0 = qp->s_ack_state << 24;
|
||||
break;
|
||||
@@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
|
||||
ohdr->u.aeth = ipath_compute_aeth(qp);
|
||||
hwords++;
|
||||
qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
|
||||
if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
|
||||
qp->s_tail_ack_queue = 0;
|
||||
}
|
||||
bth0 = qp->s_ack_state << 24;
|
||||
bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
|
||||
@@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
|
||||
* the ACK before setting s_ack_state to ACKNOWLEDGE
|
||||
* (see above).
|
||||
*/
|
||||
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
|
||||
qp->s_ack_state = OP(SEND_ONLY);
|
||||
qp->s_flags &= ~IPATH_S_ACK_PENDING;
|
||||
qp->s_cur_sge = NULL;
|
||||
if (qp->s_nak_state)
|
||||
@@ -223,23 +223,18 @@ int ipath_make_rc_req(struct ipath_qp *qp,
|
||||
/* Sending responses has higher priority over sending requests. */
|
||||
if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
|
||||
(qp->s_flags & IPATH_S_ACK_PENDING) ||
|
||||
qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) &&
|
||||
qp->s_ack_state != OP(ACKNOWLEDGE)) &&
|
||||
ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p))
|
||||
goto done;
|
||||
|
||||
if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
|
||||
qp->s_rnr_timeout)
|
||||
qp->s_rnr_timeout || qp->s_wait_credit)
|
||||
goto bail;
|
||||
|
||||
/* Limit the number of packets sent without an ACK. */
|
||||
if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
|
||||
qp->s_wait_credit = 1;
|
||||
dev->n_rc_stalls++;
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (list_empty(&qp->timerwait))
|
||||
list_add_tail(&qp->timerwait,
|
||||
&dev->pending[dev->pending_index]);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
@@ -587,9 +582,12 @@ static void send_rc_ack(struct ipath_qp *qp)
|
||||
u32 hwords;
|
||||
struct ipath_ib_header hdr;
|
||||
struct ipath_other_headers *ohdr;
|
||||
unsigned long flags;
|
||||
|
||||
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
|
||||
if (qp->r_head_ack_queue != qp->s_tail_ack_queue)
|
||||
if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
|
||||
(qp->s_flags & IPATH_S_ACK_PENDING) ||
|
||||
qp->s_ack_state != OP(ACKNOWLEDGE))
|
||||
goto queue_ack;
|
||||
|
||||
/* Construct the header. */
|
||||
@@ -640,11 +638,11 @@ static void send_rc_ack(struct ipath_qp *qp)
|
||||
dev->n_rc_qacks++;
|
||||
|
||||
queue_ack:
|
||||
spin_lock_irq(&qp->s_lock);
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
qp->s_flags |= IPATH_S_ACK_PENDING;
|
||||
qp->s_nak_state = qp->r_nak_state;
|
||||
qp->s_ack_psn = qp->r_ack_psn;
|
||||
spin_unlock_irq(&qp->s_lock);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
|
||||
/* Call ipath_do_rc_send() in another thread. */
|
||||
tasklet_hi_schedule(&qp->s_task);
|
||||
@@ -1261,6 +1259,7 @@ ack_err:
|
||||
wc.dlid_path_bits = 0;
|
||||
wc.port_num = 0;
|
||||
ipath_sqerror_qp(qp, &wc);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
bail:
|
||||
return;
|
||||
}
|
||||
@@ -1294,6 +1293,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
|
||||
struct ipath_ack_entry *e;
|
||||
u8 i, prev;
|
||||
int old_req;
|
||||
unsigned long flags;
|
||||
|
||||
if (diff > 0) {
|
||||
/*
|
||||
@@ -1327,7 +1327,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
|
||||
psn &= IPATH_PSN_MASK;
|
||||
e = NULL;
|
||||
old_req = 1;
|
||||
spin_lock_irq(&qp->s_lock);
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
for (i = qp->r_head_ack_queue; ; i = prev) {
|
||||
if (i == qp->s_tail_ack_queue)
|
||||
old_req = 0;
|
||||
@@ -1425,7 +1425,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
|
||||
* after all the previous RDMA reads and atomics.
|
||||
*/
|
||||
if (i == qp->r_head_ack_queue) {
|
||||
spin_unlock_irq(&qp->s_lock);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
qp->r_nak_state = 0;
|
||||
qp->r_ack_psn = qp->r_psn - 1;
|
||||
goto send_ack;
|
||||
@@ -1439,11 +1439,10 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
|
||||
break;
|
||||
}
|
||||
qp->r_nak_state = 0;
|
||||
spin_unlock_irq(&qp->s_lock);
|
||||
tasklet_hi_schedule(&qp->s_task);
|
||||
|
||||
unlock_done:
|
||||
spin_unlock_irq(&qp->s_lock);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
done:
|
||||
return 1;
|
||||
|
||||
@@ -1453,10 +1452,12 @@ send_ack:
|
||||
|
||||
static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
|
||||
{
|
||||
spin_lock_irq(&qp->s_lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
qp->state = IB_QPS_ERR;
|
||||
ipath_error_qp(qp, err);
|
||||
spin_unlock_irq(&qp->s_lock);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -139,33 +139,24 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
|
||||
* See ipath_mmap() for details.
|
||||
*/
|
||||
if (udata && udata->outlen >= sizeof(__u64)) {
|
||||
struct ipath_mmap_info *ip;
|
||||
__u64 offset = (__u64) srq->rq.wq;
|
||||
int err;
|
||||
u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
|
||||
|
||||
err = ib_copy_to_udata(udata, &offset, sizeof(offset));
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
goto bail_wq;
|
||||
}
|
||||
|
||||
/* Allocate info for ipath_mmap(). */
|
||||
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
|
||||
if (!ip) {
|
||||
srq->ip =
|
||||
ipath_create_mmap_info(dev, s,
|
||||
ibpd->uobject->context,
|
||||
srq->rq.wq);
|
||||
if (!srq->ip) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto bail_wq;
|
||||
}
|
||||
srq->ip = ip;
|
||||
ip->context = ibpd->uobject->context;
|
||||
ip->obj = srq->rq.wq;
|
||||
kref_init(&ip->ref);
|
||||
ip->mmap_cnt = 0;
|
||||
ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
|
||||
srq->rq.size * sz);
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
ip->next = dev->pending_mmaps;
|
||||
dev->pending_mmaps = ip;
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
|
||||
err = ib_copy_to_udata(udata, &srq->ip->offset,
|
||||
sizeof(srq->ip->offset));
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
goto bail_ip;
|
||||
}
|
||||
} else
|
||||
srq->ip = NULL;
|
||||
|
||||
@@ -181,21 +172,27 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
|
||||
if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
|
||||
spin_unlock(&dev->n_srqs_lock);
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto bail_wq;
|
||||
goto bail_ip;
|
||||
}
|
||||
|
||||
dev->n_srqs_allocated++;
|
||||
spin_unlock(&dev->n_srqs_lock);
|
||||
|
||||
if (srq->ip) {
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
}
|
||||
|
||||
ret = &srq->ibsrq;
|
||||
goto done;
|
||||
|
||||
bail_ip:
|
||||
kfree(srq->ip);
|
||||
bail_wq:
|
||||
vfree(srq->rq.wq);
|
||||
|
||||
bail_srq:
|
||||
kfree(srq);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
@@ -312,13 +309,13 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
if (srq->ip) {
|
||||
struct ipath_mmap_info *ip = srq->ip;
|
||||
struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
|
||||
u32 s = sizeof(struct ipath_rwq) + size * sz;
|
||||
|
||||
ip->obj = wq;
|
||||
ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
|
||||
size * sz);
|
||||
ipath_update_mmap_info(dev, ip, s, wq);
|
||||
spin_lock_irq(&dev->pending_lock);
|
||||
ip->next = dev->pending_mmaps;
|
||||
dev->pending_mmaps = ip;
|
||||
if (list_empty(&ip->pending_mmaps))
|
||||
list_add(&ip->pending_mmaps,
|
||||
&dev->pending_mmaps);
|
||||
spin_unlock_irq(&dev->pending_lock);
|
||||
}
|
||||
} else if (attr_mask & IB_SRQ_LIMIT) {
|
||||
|
@@ -31,8 +31,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
|
||||
struct infinipath_stats ipath_stats;
|
||||
|
@@ -32,7 +32,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
#include "ipath_common.h"
|
||||
|
@@ -1476,7 +1476,10 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
|
||||
ret = -ENOMEM;
|
||||
goto err_lk;
|
||||
}
|
||||
INIT_LIST_HEAD(&idev->pending_mmaps);
|
||||
spin_lock_init(&idev->pending_lock);
|
||||
idev->mmap_offset = PAGE_SIZE;
|
||||
spin_lock_init(&idev->mmap_offset_lock);
|
||||
INIT_LIST_HEAD(&idev->pending[0]);
|
||||
INIT_LIST_HEAD(&idev->pending[1]);
|
||||
INIT_LIST_HEAD(&idev->pending[2]);
|
||||
@@ -1558,6 +1561,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
|
||||
(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
|
||||
dev->node_type = RDMA_NODE_IB_CA;
|
||||
dev->phys_port_cnt = 1;
|
||||
dev->num_comp_vectors = 1;
|
||||
dev->dma_device = &dd->pcidev->dev;
|
||||
dev->query_device = ipath_query_device;
|
||||
dev->modify_device = ipath_modify_device;
|
||||
|
@@ -173,12 +173,12 @@ struct ipath_ah {
|
||||
* this as its vm_private_data.
|
||||
*/
|
||||
struct ipath_mmap_info {
|
||||
struct ipath_mmap_info *next;
|
||||
struct list_head pending_mmaps;
|
||||
struct ib_ucontext *context;
|
||||
void *obj;
|
||||
__u64 offset;
|
||||
struct kref ref;
|
||||
unsigned size;
|
||||
unsigned mmap_cnt;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -422,7 +422,7 @@ struct ipath_qp {
|
||||
#define IPATH_S_RDMAR_PENDING 0x04
|
||||
#define IPATH_S_ACK_PENDING 0x08
|
||||
|
||||
#define IPATH_PSN_CREDIT 2048
|
||||
#define IPATH_PSN_CREDIT 512
|
||||
|
||||
/*
|
||||
* Since struct ipath_swqe is not a fixed size, we can't simply index into
|
||||
@@ -485,9 +485,10 @@ struct ipath_opcode_stats {
|
||||
|
||||
struct ipath_ibdev {
|
||||
struct ib_device ibdev;
|
||||
struct list_head dev_list;
|
||||
struct ipath_devdata *dd;
|
||||
struct ipath_mmap_info *pending_mmaps;
|
||||
struct list_head pending_mmaps;
|
||||
spinlock_t mmap_offset_lock;
|
||||
u32 mmap_offset;
|
||||
int ib_unit; /* This is the device number */
|
||||
u16 sm_lid; /* in host order */
|
||||
u8 sm_sl;
|
||||
@@ -734,13 +735,13 @@ int ipath_destroy_srq(struct ib_srq *ibsrq);
|
||||
|
||||
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
||||
|
||||
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
|
||||
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
|
||||
int ipath_destroy_cq(struct ib_cq *ibcq);
|
||||
|
||||
int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
|
||||
int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
|
||||
|
||||
int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
||||
|
||||
@@ -768,6 +769,15 @@ int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
|
||||
|
||||
void ipath_release_mmap_info(struct kref *ref);
|
||||
|
||||
struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
|
||||
u32 size,
|
||||
struct ib_ucontext *context,
|
||||
void *obj);
|
||||
|
||||
void ipath_update_mmap_info(struct ipath_ibdev *dev,
|
||||
struct ipath_mmap_info *ip,
|
||||
u32 size, void *obj);
|
||||
|
||||
int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
|
||||
void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
|
||||
|
@@ -726,11 +726,12 @@ repoll:
|
||||
return err == 0 || err == -EAGAIN ? npolled : err;
|
||||
}
|
||||
|
||||
int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
|
||||
int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags)
|
||||
{
|
||||
__be32 doorbell[2];
|
||||
|
||||
doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ?
|
||||
doorbell[0] = cpu_to_be32(((flags & IB_CQ_SOLICITED_MASK) ==
|
||||
IB_CQ_SOLICITED ?
|
||||
MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
|
||||
MTHCA_TAVOR_CQ_DB_REQ_NOT) |
|
||||
to_mcq(cq)->cqn);
|
||||
@@ -743,7 +744,7 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
{
|
||||
struct mthca_cq *cq = to_mcq(ibcq);
|
||||
__be32 doorbell[2];
|
||||
@@ -755,7 +756,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
|
||||
doorbell[0] = ci;
|
||||
doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
|
||||
(notify == IB_CQ_SOLICITED ? 1 : 2));
|
||||
((flags & IB_CQ_SOLICITED_MASK) ==
|
||||
IB_CQ_SOLICITED ? 1 : 2));
|
||||
|
||||
mthca_write_db_rec(doorbell, cq->arm_db);
|
||||
|
||||
@@ -766,7 +768,7 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
wmb();
|
||||
|
||||
doorbell[0] = cpu_to_be32((sn << 28) |
|
||||
(notify == IB_CQ_SOLICITED ?
|
||||
((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
|
||||
MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
|
||||
MTHCA_ARBEL_CQ_DB_REQ_NOT) |
|
||||
cq->cqn);
|
||||
|
@@ -495,8 +495,8 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev);
|
||||
|
||||
int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
|
||||
struct ib_wc *entry);
|
||||
int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify);
|
||||
int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify);
|
||||
int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
int mthca_init_cq(struct mthca_dev *dev, int nent,
|
||||
struct mthca_ucontext *ctx, u32 pdn,
|
||||
struct mthca_cq *cq);
|
||||
|
@@ -38,7 +38,6 @@
|
||||
#define MTHCA_MEMFREE_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#define MTHCA_ICM_CHUNK_LEN \
|
||||
|
@@ -663,6 +663,7 @@ static int mthca_destroy_qp(struct ib_qp *qp)
|
||||
}
|
||||
|
||||
static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
|
||||
int comp_vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
@@ -1292,6 +1293,7 @@ int mthca_register_device(struct mthca_dev *dev)
|
||||
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
|
||||
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
|
||||
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
|
||||
dev->ib_dev.num_comp_vectors = 1;
|
||||
dev->ib_dev.dma_device = &dev->pdev->dev;
|
||||
dev->ib_dev.query_device = mthca_query_device;
|
||||
dev->ib_dev.query_port = mthca_query_port;
|
||||
|
@@ -701,6 +701,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
|
||||
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
|
||||
}
|
||||
|
||||
if (ibqp->qp_type == IB_QPT_RC &&
|
||||
cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
|
||||
u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
|
||||
|
||||
if (mthca_is_memfree(dev))
|
||||
qp_context->rlkey_arbel_sched_queue |= sched_queue;
|
||||
else
|
||||
qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
|
||||
|
||||
qp_param->opt_param_mask |=
|
||||
cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_TIMEOUT) {
|
||||
qp_context->pri_path.ackto = attr->timeout << 3;
|
||||
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
|
||||
|
Reference in New Issue
Block a user