Merge branches 'cxgb4-4.8', 'mlx5-4.8' and 'fw-version' into k.o/for-4.8
This commit is contained in:
@@ -294,6 +294,25 @@ static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
|
||||
return;
|
||||
}
|
||||
|
||||
static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
unsigned int i;
|
||||
size_t len;
|
||||
|
||||
len = roundup(sizeof(union cpl_wr_size), 16);
|
||||
for (i = 0; i < size; i++) {
|
||||
skb = alloc_skb(len, GFP_KERNEL);
|
||||
if (!skb)
|
||||
goto fail;
|
||||
skb_queue_tail(ep_skb_list, skb);
|
||||
}
|
||||
return 0;
|
||||
fail:
|
||||
skb_queue_purge(ep_skb_list);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void *alloc_ep(int size, gfp_t gfp)
|
||||
{
|
||||
struct c4iw_ep_common *epc;
|
||||
@@ -384,6 +403,8 @@ void _c4iw_free_ep(struct kref *kref)
|
||||
if (ep->mpa_skb)
|
||||
kfree_skb(ep->mpa_skb);
|
||||
}
|
||||
if (!skb_queue_empty(&ep->com.ep_skb_list))
|
||||
skb_queue_purge(&ep->com.ep_skb_list);
|
||||
kfree(ep);
|
||||
}
|
||||
|
||||
@@ -620,25 +641,27 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
|
||||
}
|
||||
}
|
||||
|
||||
static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
|
||||
static int send_flowc(struct c4iw_ep *ep)
|
||||
{
|
||||
unsigned int flowclen = 80;
|
||||
struct fw_flowc_wr *flowc;
|
||||
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||
int i;
|
||||
u16 vlan = ep->l2t->vlan;
|
||||
int nparams;
|
||||
|
||||
if (WARN_ON(!skb))
|
||||
return -ENOMEM;
|
||||
|
||||
if (vlan == CPL_L2T_VLAN_NONE)
|
||||
nparams = 8;
|
||||
else
|
||||
nparams = 9;
|
||||
|
||||
skb = get_skb(skb, flowclen, GFP_KERNEL);
|
||||
flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
|
||||
flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN);
|
||||
|
||||
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
|
||||
FW_FLOWC_WR_NPARAMS_V(nparams));
|
||||
flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
|
||||
flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
|
||||
16)) | FW_WR_FLOWID_V(ep->hwtid));
|
||||
|
||||
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
|
||||
@@ -679,18 +702,16 @@ static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
|
||||
return c4iw_ofld_send(&ep->com.dev->rdev, skb);
|
||||
}
|
||||
|
||||
static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
|
||||
static int send_halfclose(struct c4iw_ep *ep)
|
||||
{
|
||||
struct cpl_close_con_req *req;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||
int wrlen = roundup(sizeof *req, 16);
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
skb = get_skb(NULL, wrlen, gfp);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
|
||||
if (WARN_ON(!skb))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
|
||||
req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
|
||||
@@ -701,26 +722,24 @@ static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
|
||||
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||
}
|
||||
|
||||
static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
|
||||
static int send_abort(struct c4iw_ep *ep)
|
||||
{
|
||||
struct cpl_abort_req *req;
|
||||
int wrlen = roundup(sizeof *req, 16);
|
||||
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
skb = get_skb(skb, wrlen, gfp);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
|
||||
__func__);
|
||||
if (WARN_ON(!req_skb))
|
||||
return -ENOMEM;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
t4_set_arp_err_handler(skb, ep, abort_arp_failure);
|
||||
req = (struct cpl_abort_req *) skb_put(skb, wrlen);
|
||||
|
||||
set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
t4_set_arp_err_handler(req_skb, ep, abort_arp_failure);
|
||||
req = (struct cpl_abort_req *)skb_put(req_skb, wrlen);
|
||||
memset(req, 0, wrlen);
|
||||
INIT_TP_WR(req, ep->hwtid);
|
||||
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
|
||||
req->cmd = CPL_ABORT_SEND_RST;
|
||||
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||
return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
|
||||
}
|
||||
|
||||
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
|
||||
@@ -992,9 +1011,19 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
|
||||
|
||||
mpa = (struct mpa_message *)(req + 1);
|
||||
memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
|
||||
mpa->flags = (crc_enabled ? MPA_CRC : 0) |
|
||||
(markers_enabled ? MPA_MARKERS : 0) |
|
||||
(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
|
||||
|
||||
mpa->flags = 0;
|
||||
if (crc_enabled)
|
||||
mpa->flags |= MPA_CRC;
|
||||
if (markers_enabled) {
|
||||
mpa->flags |= MPA_MARKERS;
|
||||
ep->mpa_attr.recv_marker_enabled = 1;
|
||||
} else {
|
||||
ep->mpa_attr.recv_marker_enabled = 0;
|
||||
}
|
||||
if (mpa_rev_to_use == 2)
|
||||
mpa->flags |= MPA_ENHANCED_RDMA_CONN;
|
||||
|
||||
mpa->private_data_size = htons(ep->plen);
|
||||
mpa->revision = mpa_rev_to_use;
|
||||
if (mpa_rev_to_use == 1) {
|
||||
@@ -1169,8 +1198,11 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
|
||||
mpa = (struct mpa_message *)(req + 1);
|
||||
memset(mpa, 0, sizeof(*mpa));
|
||||
memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
|
||||
mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
|
||||
(markers_enabled ? MPA_MARKERS : 0);
|
||||
mpa->flags = 0;
|
||||
if (ep->mpa_attr.crc_enabled)
|
||||
mpa->flags |= MPA_CRC;
|
||||
if (ep->mpa_attr.recv_marker_enabled)
|
||||
mpa->flags |= MPA_MARKERS;
|
||||
mpa->revision = ep->mpa_attr.version;
|
||||
mpa->private_data_size = htons(plen);
|
||||
|
||||
@@ -1248,7 +1280,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
set_bit(ACT_ESTAB, &ep->com.history);
|
||||
|
||||
/* start MPA negotiation */
|
||||
ret = send_flowc(ep, NULL);
|
||||
ret = send_flowc(ep);
|
||||
if (ret)
|
||||
goto err;
|
||||
if (ep->retry_with_mpa_v1)
|
||||
@@ -1555,7 +1587,6 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
||||
*/
|
||||
__state_set(&ep->com, FPDU_MODE);
|
||||
ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
|
||||
ep->mpa_attr.recv_marker_enabled = markers_enabled;
|
||||
ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
|
||||
ep->mpa_attr.version = mpa->revision;
|
||||
ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
|
||||
@@ -2004,12 +2035,17 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
||||
}
|
||||
|
||||
/*
|
||||
* Return whether a failed active open has allocated a TID
|
||||
* Some of the error codes above implicitly indicate that there is no TID
|
||||
* allocated with the result of an ACT_OPEN. We use this predicate to make
|
||||
* that explicit.
|
||||
*/
|
||||
static inline int act_open_has_tid(int status)
|
||||
{
|
||||
return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
|
||||
status != CPL_ERR_ARP_MISS;
|
||||
return (status != CPL_ERR_TCAM_PARITY &&
|
||||
status != CPL_ERR_TCAM_MISS &&
|
||||
status != CPL_ERR_TCAM_FULL &&
|
||||
status != CPL_ERR_CONN_EXIST_SYNRECV &&
|
||||
status != CPL_ERR_CONN_EXIST);
|
||||
}
|
||||
|
||||
/* Returns whether a CPL status conveys negative advice.
|
||||
@@ -2130,6 +2166,7 @@ out:
|
||||
static int c4iw_reconnect(struct c4iw_ep *ep)
|
||||
{
|
||||
int err = 0;
|
||||
int size = 0;
|
||||
struct sockaddr_in *laddr = (struct sockaddr_in *)
|
||||
&ep->com.cm_id->m_local_addr;
|
||||
struct sockaddr_in *raddr = (struct sockaddr_in *)
|
||||
@@ -2145,6 +2182,21 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
|
||||
init_timer(&ep->timer);
|
||||
c4iw_init_wr_wait(&ep->com.wr_wait);
|
||||
|
||||
/* When MPA revision is different on nodes, the node with MPA_rev=2
|
||||
* tries to reconnect with MPA_rev 1 for the same EP through
|
||||
* c4iw_reconnect(), where the same EP is assigned with new tid for
|
||||
* further connection establishment. As we are using the same EP pointer
|
||||
* for reconnect, few skbs are used during the previous c4iw_connect(),
|
||||
* which leaves the EP with inadequate skbs for further
|
||||
* c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
|
||||
* skb_list() during peer_abort(). Allocate skbs which is already used.
|
||||
*/
|
||||
size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
|
||||
if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
|
||||
err = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an active TID to initiate a TCP connection.
|
||||
*/
|
||||
@@ -2210,6 +2262,7 @@ fail2:
|
||||
* response of 1st connect request.
|
||||
*/
|
||||
connect_reply_upcall(ep, -ECONNRESET);
|
||||
fail1:
|
||||
c4iw_put_ep(&ep->com);
|
||||
out:
|
||||
return err;
|
||||
@@ -2576,6 +2629,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
|
||||
child_ep->mtu = peer_mss + hdrs;
|
||||
|
||||
skb_queue_head_init(&child_ep->com.ep_skb_list);
|
||||
if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
|
||||
goto fail;
|
||||
|
||||
state_set(&child_ep->com, CONNECTING);
|
||||
child_ep->com.dev = dev;
|
||||
child_ep->com.cm_id = NULL;
|
||||
@@ -2640,6 +2697,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
|
||||
}
|
||||
goto out;
|
||||
fail:
|
||||
c4iw_put_ep(&child_ep->com);
|
||||
reject:
|
||||
reject_cr(dev, hwtid, skb);
|
||||
if (parent_ep)
|
||||
@@ -2670,7 +2729,7 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
ep->com.state = MPA_REQ_WAIT;
|
||||
start_ep_timer(ep);
|
||||
set_bit(PASS_ESTAB, &ep->com.history);
|
||||
ret = send_flowc(ep, skb);
|
||||
ret = send_flowc(ep);
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
if (ret)
|
||||
c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
|
||||
@@ -2871,10 +2930,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
}
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
|
||||
rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
|
||||
if (!rpl_skb) {
|
||||
printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
|
||||
__func__);
|
||||
rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||
if (WARN_ON(!rpl_skb)) {
|
||||
release = 1;
|
||||
goto out;
|
||||
}
|
||||
@@ -3025,9 +3082,9 @@ out:
|
||||
|
||||
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
||||
{
|
||||
int err = 0;
|
||||
int disconnect = 0;
|
||||
int abort;
|
||||
struct c4iw_ep *ep = to_ep(cm_id);
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
|
||||
mutex_lock(&ep->com.mutex);
|
||||
@@ -3038,16 +3095,13 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
||||
}
|
||||
set_bit(ULP_REJECT, &ep->com.history);
|
||||
if (mpa_rev == 0)
|
||||
disconnect = 2;
|
||||
else {
|
||||
err = send_mpa_reject(ep, pdata, pdata_len);
|
||||
disconnect = 1;
|
||||
}
|
||||
abort = 1;
|
||||
else
|
||||
abort = send_mpa_reject(ep, pdata, pdata_len);
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
if (disconnect) {
|
||||
stop_ep_timer(ep);
|
||||
err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
|
||||
}
|
||||
|
||||
stop_ep_timer(ep);
|
||||
c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
@@ -3248,6 +3302,13 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb_queue_head_init(&ep->com.ep_skb_list);
|
||||
if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
|
||||
err = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
init_timer(&ep->timer);
|
||||
ep->plen = conn_param->private_data_len;
|
||||
if (ep->plen)
|
||||
@@ -3266,7 +3327,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
if (!ep->com.qp) {
|
||||
PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
|
||||
err = -EINVAL;
|
||||
goto fail1;
|
||||
goto fail2;
|
||||
}
|
||||
ref_qp(ep);
|
||||
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
|
||||
@@ -3279,7 +3340,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
if (ep->atid == -1) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto fail1;
|
||||
goto fail2;
|
||||
}
|
||||
insert_handle(dev, &dev->atid_idr, ep, ep->atid);
|
||||
|
||||
@@ -3303,7 +3364,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
|
||||
err = pick_local_ipaddrs(dev, cm_id);
|
||||
if (err)
|
||||
goto fail1;
|
||||
goto fail2;
|
||||
}
|
||||
|
||||
/* find a route */
|
||||
@@ -3323,7 +3384,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
|
||||
err = pick_local_ip6addrs(dev, cm_id);
|
||||
if (err)
|
||||
goto fail1;
|
||||
goto fail2;
|
||||
}
|
||||
|
||||
/* find a route */
|
||||
@@ -3339,14 +3400,14 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
if (!ep->dst) {
|
||||
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
|
||||
err = -EHOSTUNREACH;
|
||||
goto fail2;
|
||||
goto fail3;
|
||||
}
|
||||
|
||||
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
|
||||
ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
|
||||
if (err) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
|
||||
goto fail3;
|
||||
goto fail4;
|
||||
}
|
||||
|
||||
PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
|
||||
@@ -3362,13 +3423,15 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
goto out;
|
||||
|
||||
cxgb4_l2t_release(ep->l2t);
|
||||
fail3:
|
||||
fail4:
|
||||
dst_release(ep->dst);
|
||||
fail2:
|
||||
fail3:
|
||||
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
|
||||
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
|
||||
fail1:
|
||||
fail2:
|
||||
skb_queue_purge(&ep->com.ep_skb_list);
|
||||
deref_cm_id(&ep->com);
|
||||
fail1:
|
||||
c4iw_put_ep(&ep->com);
|
||||
out:
|
||||
return err;
|
||||
@@ -3461,6 +3524,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
|
||||
err = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
skb_queue_head_init(&ep->com.ep_skb_list);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
ep->com.cm_id = cm_id;
|
||||
ref_cm_id(&ep->com);
|
||||
@@ -3577,6 +3641,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
|
||||
case MPA_REQ_RCVD:
|
||||
case MPA_REP_SENT:
|
||||
case FPDU_MODE:
|
||||
case CONNECTING:
|
||||
close = 1;
|
||||
if (abrupt)
|
||||
ep->com.state = ABORTING;
|
||||
@@ -3611,10 +3676,10 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
|
||||
if (abrupt) {
|
||||
set_bit(EP_DISC_ABORT, &ep->com.history);
|
||||
close_complete_upcall(ep, -ECONNRESET);
|
||||
ret = send_abort(ep, NULL, gfp);
|
||||
ret = send_abort(ep);
|
||||
} else {
|
||||
set_bit(EP_DISC_CLOSE, &ep->com.history);
|
||||
ret = send_halfclose(ep, gfp);
|
||||
ret = send_halfclose(ep);
|
||||
}
|
||||
if (ret) {
|
||||
set_bit(EP_DISC_FAIL, &ep->com.history);
|
||||
|
@@ -33,19 +33,15 @@
|
||||
#include "iw_cxgb4.h"
|
||||
|
||||
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
struct c4iw_dev_ucontext *uctx)
|
||||
struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
|
||||
{
|
||||
struct fw_ri_res_wr *res_wr;
|
||||
struct fw_ri_res *res;
|
||||
int wr_len;
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
struct sk_buff *skb;
|
||||
int ret;
|
||||
|
||||
wr_len = sizeof *res_wr + sizeof *res;
|
||||
skb = alloc_skb(wr_len, GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
|
||||
|
||||
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
|
||||
@@ -863,7 +859,9 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
|
||||
ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
|
||||
: NULL;
|
||||
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
|
||||
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
|
||||
chp->destroy_skb);
|
||||
chp->destroy_skb = NULL;
|
||||
kfree(chp);
|
||||
return 0;
|
||||
}
|
||||
@@ -879,7 +877,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
struct c4iw_cq *chp;
|
||||
struct c4iw_create_cq_resp uresp;
|
||||
struct c4iw_ucontext *ucontext = NULL;
|
||||
int ret;
|
||||
int ret, wr_len;
|
||||
size_t memsize, hwentries;
|
||||
struct c4iw_mm_entry *mm, *mm2;
|
||||
|
||||
@@ -896,6 +894,13 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
if (!chp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
|
||||
chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
|
||||
if (!chp->destroy_skb) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (ib_context)
|
||||
ucontext = to_c4iw_ucontext(ib_context);
|
||||
|
||||
@@ -936,7 +941,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
ret = create_cq(&rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
if (ret)
|
||||
goto err1;
|
||||
goto err2;
|
||||
|
||||
chp->rhp = rhp;
|
||||
chp->cq.size--; /* status page */
|
||||
@@ -947,15 +952,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
init_waitqueue_head(&chp->wait);
|
||||
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
|
||||
if (ret)
|
||||
goto err2;
|
||||
goto err3;
|
||||
|
||||
if (ucontext) {
|
||||
mm = kmalloc(sizeof *mm, GFP_KERNEL);
|
||||
if (!mm)
|
||||
goto err3;
|
||||
goto err4;
|
||||
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
|
||||
if (!mm2)
|
||||
goto err4;
|
||||
goto err5;
|
||||
|
||||
uresp.qid_mask = rhp->rdev.cqmask;
|
||||
uresp.cqid = chp->cq.cqid;
|
||||
@@ -970,7 +975,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
ret = ib_copy_to_udata(udata, &uresp,
|
||||
sizeof(uresp) - sizeof(uresp.reserved));
|
||||
if (ret)
|
||||
goto err5;
|
||||
goto err6;
|
||||
|
||||
mm->key = uresp.key;
|
||||
mm->addr = virt_to_phys(chp->cq.queue);
|
||||
@@ -986,15 +991,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
__func__, chp->cq.cqid, chp, chp->cq.size,
|
||||
chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
|
||||
return &chp->ibcq;
|
||||
err5:
|
||||
err6:
|
||||
kfree(mm2);
|
||||
err4:
|
||||
err5:
|
||||
kfree(mm);
|
||||
err3:
|
||||
err4:
|
||||
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
|
||||
err2:
|
||||
err3:
|
||||
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
|
||||
chp->destroy_skb);
|
||||
err2:
|
||||
kfree_skb(chp->destroy_skb);
|
||||
err1:
|
||||
kfree(chp);
|
||||
return ERR_PTR(ret);
|
||||
|
@@ -317,7 +317,7 @@ static int qp_open(struct inode *inode, struct file *file)
|
||||
idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
|
||||
spin_unlock_irq(&qpd->devp->lock);
|
||||
|
||||
qpd->bufsize = count * 128;
|
||||
qpd->bufsize = count * 180;
|
||||
qpd->buf = vmalloc(qpd->bufsize);
|
||||
if (!qpd->buf) {
|
||||
kfree(qpd);
|
||||
|
@@ -384,6 +384,7 @@ struct c4iw_mr {
|
||||
struct ib_mr ibmr;
|
||||
struct ib_umem *umem;
|
||||
struct c4iw_dev *rhp;
|
||||
struct sk_buff *dereg_skb;
|
||||
u64 kva;
|
||||
struct tpt_attributes attr;
|
||||
u64 *mpl;
|
||||
@@ -400,6 +401,7 @@ static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
|
||||
struct c4iw_mw {
|
||||
struct ib_mw ibmw;
|
||||
struct c4iw_dev *rhp;
|
||||
struct sk_buff *dereg_skb;
|
||||
u64 kva;
|
||||
struct tpt_attributes attr;
|
||||
};
|
||||
@@ -412,6 +414,7 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
|
||||
struct c4iw_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct c4iw_dev *rhp;
|
||||
struct sk_buff *destroy_skb;
|
||||
struct t4_cq cq;
|
||||
spinlock_t lock;
|
||||
spinlock_t comp_handler_lock;
|
||||
@@ -789,10 +792,29 @@ enum c4iw_ep_history {
|
||||
CM_ID_DEREFED = 28,
|
||||
};
|
||||
|
||||
enum conn_pre_alloc_buffers {
|
||||
CN_ABORT_REQ_BUF,
|
||||
CN_ABORT_RPL_BUF,
|
||||
CN_CLOSE_CON_REQ_BUF,
|
||||
CN_DESTROY_BUF,
|
||||
CN_FLOWC_BUF,
|
||||
CN_MAX_CON_BUF
|
||||
};
|
||||
|
||||
#define FLOWC_LEN 80
|
||||
union cpl_wr_size {
|
||||
struct cpl_abort_req abrt_req;
|
||||
struct cpl_abort_rpl abrt_rpl;
|
||||
struct fw_ri_wr ri_req;
|
||||
struct cpl_close_con_req close_req;
|
||||
char flowc_buf[FLOWC_LEN];
|
||||
};
|
||||
|
||||
struct c4iw_ep_common {
|
||||
struct iw_cm_id *cm_id;
|
||||
struct c4iw_qp *qp;
|
||||
struct c4iw_dev *dev;
|
||||
struct sk_buff_head ep_skb_list;
|
||||
enum c4iw_ep_state state;
|
||||
struct kref kref;
|
||||
struct mutex mutex;
|
||||
|
@@ -59,9 +59,9 @@ static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
|
||||
}
|
||||
|
||||
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
|
||||
u32 len, dma_addr_t data, int wait)
|
||||
u32 len, dma_addr_t data,
|
||||
int wait, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct ulp_mem_io *req;
|
||||
struct ulptx_sgl *sgl;
|
||||
u8 wr_len;
|
||||
@@ -74,9 +74,11 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
|
||||
c4iw_init_wr_wait(&wr_wait);
|
||||
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
|
||||
|
||||
skb = alloc_skb(wr_len, GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
if (!skb) {
|
||||
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
|
||||
|
||||
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
|
||||
@@ -108,9 +110,8 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
|
||||
}
|
||||
|
||||
static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
void *data)
|
||||
void *data, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct ulp_mem_io *req;
|
||||
struct ulptx_idata *sc;
|
||||
u8 wr_len, *to_dp, *from_dp;
|
||||
@@ -134,9 +135,11 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
wr_len = roundup(sizeof *req + sizeof *sc +
|
||||
roundup(copy_len, T4_ULPTX_MIN_IO), 16);
|
||||
|
||||
skb = alloc_skb(wr_len, GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
if (!skb) {
|
||||
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
|
||||
|
||||
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
|
||||
@@ -173,6 +176,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
|
||||
(copy_len % T4_ULPTX_MIN_IO));
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
skb = NULL;
|
||||
if (ret)
|
||||
return ret;
|
||||
len -= C4IW_MAX_INLINE_SIZE;
|
||||
@@ -182,7 +186,8 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
|
||||
static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
void *data, struct sk_buff *skb)
|
||||
{
|
||||
u32 remain = len;
|
||||
u32 dmalen;
|
||||
@@ -205,7 +210,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *
|
||||
dmalen = T4_ULPTX_MAX_DMA;
|
||||
remain -= dmalen;
|
||||
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
|
||||
!remain);
|
||||
!remain, skb);
|
||||
if (ret)
|
||||
goto out;
|
||||
addr += dmalen >> 5;
|
||||
@@ -213,7 +218,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *
|
||||
daddr += dmalen;
|
||||
}
|
||||
if (remain)
|
||||
ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
|
||||
ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb);
|
||||
out:
|
||||
dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
|
||||
return ret;
|
||||
@@ -224,23 +229,25 @@ out:
|
||||
* If data is NULL, clear len byte of memory to zero.
|
||||
*/
|
||||
static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
void *data)
|
||||
void *data, struct sk_buff *skb)
|
||||
{
|
||||
if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
|
||||
if (len > inline_threshold) {
|
||||
if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
|
||||
if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"%s: dma map"
|
||||
" failure (non fatal)\n",
|
||||
pci_name(rdev->lldi.pdev));
|
||||
return _c4iw_write_mem_inline(rdev, addr, len,
|
||||
data);
|
||||
} else
|
||||
data, skb);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} else
|
||||
return _c4iw_write_mem_inline(rdev, addr, len, data);
|
||||
return _c4iw_write_mem_inline(rdev, addr,
|
||||
len, data, skb);
|
||||
} else
|
||||
return _c4iw_write_mem_inline(rdev, addr, len, data);
|
||||
return _c4iw_write_mem_inline(rdev, addr, len, data, skb);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -253,7 +260,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
||||
u32 *stag, u8 stag_state, u32 pdid,
|
||||
enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
|
||||
int bind_enabled, u32 zbva, u64 to,
|
||||
u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
|
||||
u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int err;
|
||||
struct fw_ri_tpte tpt;
|
||||
@@ -307,7 +315,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
||||
}
|
||||
err = write_adapter_mem(rdev, stag_idx +
|
||||
(rdev->lldi.vr->stag.start >> 5),
|
||||
sizeof(tpt), &tpt);
|
||||
sizeof(tpt), &tpt, skb);
|
||||
|
||||
if (reset_tpt_entry) {
|
||||
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
|
||||
@@ -327,28 +335,29 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
|
||||
__func__, pbl_addr, rdev->lldi.vr->pbl.start,
|
||||
pbl_size);
|
||||
|
||||
err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
|
||||
err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
|
||||
u32 pbl_addr)
|
||||
u32 pbl_addr, struct sk_buff *skb)
|
||||
{
|
||||
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
|
||||
pbl_size, pbl_addr);
|
||||
pbl_size, pbl_addr, skb);
|
||||
}
|
||||
|
||||
static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
|
||||
{
|
||||
*stag = T4_STAG_UNSET;
|
||||
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
|
||||
0UL, 0, 0, 0, 0);
|
||||
0UL, 0, 0, 0, 0, NULL);
|
||||
}
|
||||
|
||||
static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
|
||||
static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
|
||||
0);
|
||||
0, skb);
|
||||
}
|
||||
|
||||
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
|
||||
@@ -356,7 +365,7 @@ static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
|
||||
{
|
||||
*stag = T4_STAG_UNSET;
|
||||
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
|
||||
0UL, 0, 0, pbl_size, pbl_addr);
|
||||
0UL, 0, 0, pbl_size, pbl_addr, NULL);
|
||||
}
|
||||
|
||||
static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
|
||||
@@ -383,14 +392,16 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
|
||||
mhp->attr.mw_bind_enable, mhp->attr.zbva,
|
||||
mhp->attr.va_fbo, mhp->attr.len ?
|
||||
mhp->attr.len : -1, shift - 12,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr);
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb);
|
||||
mhp->dereg_skb = NULL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -423,6 +434,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
|
||||
if (!mhp->dereg_skb) {
|
||||
ret = -ENOMEM;
|
||||
goto err0;
|
||||
}
|
||||
|
||||
mhp->rhp = rhp;
|
||||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
|
||||
@@ -435,7 +452,8 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
|
||||
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
|
||||
FW_RI_STAG_NSMR, mhp->attr.perms,
|
||||
mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
|
||||
mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
|
||||
NULL);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
@@ -445,8 +463,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
return &mhp->ibmr;
|
||||
err2:
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb);
|
||||
err1:
|
||||
kfree_skb(mhp->dereg_skb);
|
||||
err0:
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@@ -481,11 +501,18 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
|
||||
if (!mhp->dereg_skb) {
|
||||
kfree(mhp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
mhp->rhp = rhp;
|
||||
|
||||
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
|
||||
if (IS_ERR(mhp->umem)) {
|
||||
err = PTR_ERR(mhp->umem);
|
||||
kfree_skb(mhp->dereg_skb);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@@ -550,6 +577,7 @@ err_pbl:
|
||||
|
||||
err:
|
||||
ib_umem_release(mhp->umem);
|
||||
kfree_skb(mhp->dereg_skb);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@@ -572,8 +600,16 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
|
||||
if (!mhp->dereg_skb) {
|
||||
kfree(mhp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = allocate_window(&rhp->rdev, &stag, php->pdid);
|
||||
if (ret) {
|
||||
kfree(mhp->dereg_skb);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@@ -584,7 +620,8 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
mmid = (stag) >> 8;
|
||||
mhp->ibmw.rkey = stag;
|
||||
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag);
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
|
||||
kfree(mhp->dereg_skb);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -602,7 +639,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
|
||||
rhp = mhp->rhp;
|
||||
mmid = (mw->rkey) >> 8;
|
||||
remove_handle(rhp, &rhp->mmidr, mmid);
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag);
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
|
||||
kfree(mhp);
|
||||
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
|
||||
return 0;
|
||||
@@ -666,7 +703,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
|
||||
return &(mhp->ibmr);
|
||||
err3:
|
||||
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb);
|
||||
err2:
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
@@ -717,7 +754,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
|
||||
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
|
||||
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb);
|
||||
if (mhp->attr.pbl_size)
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
|
@@ -409,20 +409,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
|
||||
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
|
||||
}
|
||||
|
||||
static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
|
||||
ibdev.dev);
|
||||
PDBG("%s dev 0x%p\n", __func__, dev);
|
||||
|
||||
return sprintf(buf, "%u.%u.%u.%u\n",
|
||||
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
|
||||
FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
|
||||
FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
|
||||
FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
|
||||
}
|
||||
|
||||
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@@ -502,13 +488,11 @@ static int c4iw_get_mib(struct ib_device *ibdev,
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
|
||||
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
|
||||
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
|
||||
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
|
||||
|
||||
static struct device_attribute *c4iw_class_attributes[] = {
|
||||
&dev_attr_hw_rev,
|
||||
&dev_attr_fw_ver,
|
||||
&dev_attr_hca_type,
|
||||
&dev_attr_board_id,
|
||||
};
|
||||
@@ -530,6 +514,20 @@ static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void get_dev_fw_str(struct ib_device *dev, char *str,
|
||||
size_t str_len)
|
||||
{
|
||||
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
|
||||
ibdev);
|
||||
PDBG("%s dev 0x%p\n", __func__, dev);
|
||||
|
||||
snprintf(str, str_len, "%u.%u.%u.%u",
|
||||
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
|
||||
FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
|
||||
FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
|
||||
FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
|
||||
}
|
||||
|
||||
int c4iw_register_device(struct c4iw_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
@@ -605,6 +603,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
||||
dev->ibdev.get_hw_stats = c4iw_get_mib;
|
||||
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
|
||||
dev->ibdev.get_port_immutable = c4iw_port_immutable;
|
||||
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
|
||||
dev->ibdev.drain_sq = c4iw_drain_sq;
|
||||
dev->ibdev.drain_rq = c4iw_drain_rq;
|
||||
|
||||
|
@@ -1081,9 +1081,10 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
|
||||
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
|
||||
qhp->ep->hwtid);
|
||||
|
||||
skb = alloc_skb(sizeof *wqe, gfp);
|
||||
if (!skb)
|
||||
skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
|
||||
if (WARN_ON(!skb))
|
||||
return;
|
||||
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
|
||||
|
||||
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
|
||||
@@ -1202,9 +1203,10 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
|
||||
ep->hwtid);
|
||||
|
||||
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
|
||||
if (!skb)
|
||||
skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||
if (WARN_ON(!skb))
|
||||
return -ENOMEM;
|
||||
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
|
||||
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
|
||||
|
Reference in New Issue
Block a user