Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
 "This has been a smaller cycle than normal. One new driver was
  accepted, which is unusual, and at least one more driver remains in
  review on the list.

  Summary:

   - Driver fixes for hns, hfi1, nes, rxe, i40iw, mlx5, cxgb4,
     vmw_pvrdma

   - Many patches from MatthewW converting radix tree and IDR users to
     use xarray

   - Introduction of tracepoints to the MAD layer

   - Build large SGLs at the start for DMA mapping and get the driver to
     split them

   - Generally clean SGL handling code throughout the subsystem

   - Support for restricting RDMA devices to net namespaces for
     containers

   - Progress to remove object allocation boilerplate code from drivers

   - Change in how the mlx5 driver shows representor ports linked to VFs

   - mlx5 uapi feature to access the on chip SW ICM memory

   - Add a new driver for 'EFA'. This is HW that supports user space
     packet processing through QPs in Amazon's cloud"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (186 commits)
  RDMA/ipoib: Allow user space differentiate between valid dev_port
  IB/core, ipoib: Do not overreact to SM LID change event
  RDMA/device: Don't fire uevent before device is fully initialized
  lib/scatterlist: Remove leftover from sg_page_iter comment
  RDMA/efa: Add driver to Kconfig/Makefile
  RDMA/efa: Add the efa module
  RDMA/efa: Add EFA verbs implementation
  RDMA/efa: Add common command handlers
  RDMA/efa: Implement functions that submit and complete admin commands
  RDMA/efa: Add the ABI definitions
  RDMA/efa: Add the com service API definitions
  RDMA/efa: Add the efa_com.h file
  RDMA/efa: Add the efa.h header file
  RDMA/efa: Add EFA device definitions
  RDMA: Add EFA related definitions
  RDMA/umem: Remove hugetlb flag
  RDMA/bnxt_re: Use core helpers to get aligned DMA address
  RDMA/i40iw: Use core helpers to get aligned DMA address within a supported page size
  RDMA/verbs: Add a DMA iterator to return aligned contiguous memory blocks
  RDMA/umem: Add API to find best driver supported page size in an MR
  ...
This commit is contained in:
Linus Torvalds
2019-05-09 09:02:46 -07:00
کامیت dce45af5c2
251فایلهای تغییر یافته به همراه12549 افزوده شده و 4585 حذف شده

مشاهده پرونده

@@ -82,7 +82,7 @@ static void rxe_send_complete(unsigned long data)
}
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int comp_vector, struct ib_ucontext *context,
int comp_vector, struct ib_udata *udata,
struct rxe_create_cq_resp __user *uresp)
{
int err;
@@ -94,7 +94,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
return -ENOMEM;
}
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
if (err) {
vfree(cq->queue->buf);
@@ -115,13 +115,13 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
}
int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
struct rxe_resize_cq_resp __user *uresp)
struct rxe_resize_cq_resp __user *uresp,
struct ib_udata *udata)
{
int err;
err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
sizeof(struct rxe_cqe),
cq->queue->ip ? cq->queue->ip->context : NULL,
sizeof(struct rxe_cqe), udata,
uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
if (!err)
cq->ibcq.cqe = cqe;

مشاهده پرونده

@@ -643,7 +643,7 @@ struct rxe_atmeth {
__be32 rkey;
__be64 swap_add;
__be64 comp;
} __attribute__((__packed__));
} __packed;
static inline u64 __atmeth_va(void *arg)
{

مشاهده پرونده

@@ -53,11 +53,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
int cqe, int comp_vector);
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int comp_vector, struct ib_ucontext *context,
int comp_vector, struct ib_udata *udata,
struct rxe_create_cq_resp __user *uresp);
int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
struct rxe_resize_cq_resp __user *uresp);
struct rxe_resize_cq_resp __user *uresp,
struct ib_udata *udata);
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
@@ -91,10 +92,8 @@ struct rxe_mmap_info {
void rxe_mmap_release(struct kref *ref);
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
u32 size,
struct ib_ucontext *context,
void *obj);
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
struct ib_udata *udata, void *obj);
int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
@@ -224,13 +223,12 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_init_attr *init,
struct ib_ucontext *context,
struct ib_srq_init_attr *init, struct ib_udata *udata,
struct rxe_create_srq_resp __user *uresp);
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct rxe_modify_srq_cmd *ucmd);
struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
void rxe_dealloc(struct ib_device *ib_dev);

مشاهده پرونده

@@ -36,6 +36,7 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <asm/pgtable.h>
#include <rdma/uverbs_ioctl.h>
#include "rxe.h"
#include "rxe_loc.h"
@@ -140,13 +141,14 @@ done:
/*
* Allocate information for rxe_mmap
*/
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
u32 size,
struct ib_ucontext *context,
void *obj)
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
struct ib_udata *udata, void *obj)
{
struct rxe_mmap_info *ip;
if (!udata)
return ERR_PTR(-EINVAL);
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip)
return NULL;
@@ -165,7 +167,9 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
INIT_LIST_HEAD(&ip->pending_mmaps);
ip->info.size = size;
ip->context = context;
ip->context =
container_of(udata, struct uverbs_attr_bundle, driver_udata)
->context;
ip->obj = obj;
kref_init(&ip->ref);

مشاهده پرونده

@@ -179,7 +179,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
}
mem->umem = umem;
num_buf = umem->nmap;
num_buf = ib_umem_num_pages(umem);
rxe_mem_init(access, mem);
@@ -199,6 +199,12 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
buf = map[0]->buf;
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
if (num_buf >= RXE_BUF_PER_MAP) {
map++;
buf = map[0]->buf;
num_buf = 0;
}
vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
pr_warn("null vaddr\n");
@@ -211,11 +217,6 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
num_buf++;
buf++;
if (num_buf >= RXE_BUF_PER_MAP) {
map++;
buf = map[0]->buf;
num_buf = 0;
}
}
}

مشاهده پرونده

@@ -338,13 +338,13 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
}
static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb,
struct rxe_av *av)
static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
bool xnet = false;
__be16 df = htons(IP_DF);
struct rxe_av *av = rxe_get_av(pkt);
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
@@ -364,11 +364,11 @@ static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb,
return 0;
}
static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb,
struct rxe_av *av)
static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
struct rxe_av *av = rxe_get_av(pkt);
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
@@ -392,16 +392,15 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb,
int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
{
int err = 0;
struct rxe_av *av = rxe_get_av(pkt);
if (av->network_type == RDMA_NETWORK_IPV4)
err = prepare4(pkt, skb, av);
else if (av->network_type == RDMA_NETWORK_IPV6)
err = prepare6(pkt, skb, av);
if (skb->protocol == htons(ETH_P_IP))
err = prepare4(pkt, skb);
else if (skb->protocol == htons(ETH_P_IPV6))
err = prepare6(pkt, skb);
*crc = rxe_icrc_hdr(pkt, skb);
if (ether_addr_equal(skb->dev->dev_addr, av->dmac))
if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
pkt->mask |= RXE_LOOPBACK_MASK;
return err;
@@ -422,23 +421,20 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
struct rxe_av *av;
int err;
av = rxe_get_av(pkt);
skb->destructor = rxe_skb_tx_dtor;
skb->sk = pkt->qp->sk->sk;
rxe_add_ref(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (av->network_type == RDMA_NETWORK_IPV4) {
if (skb->protocol == htons(ETH_P_IP)) {
err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else if (av->network_type == RDMA_NETWORK_IPV6) {
} else if (skb->protocol == htons(ETH_P_IPV6)) {
err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else {
pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
atomic_dec(&pkt->qp->skb_out);
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
@@ -462,7 +458,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt)
{
unsigned int hdr_len;
struct sk_buff *skb;
struct sk_buff *skb = NULL;
struct net_device *ndev;
const struct ib_gid_attr *attr;
const int port_num = 1;
@@ -470,7 +466,6 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
if (IS_ERR(attr))
return NULL;
ndev = attr->ndev;
if (av->network_type == RDMA_NETWORK_IPV4)
hdr_len = ETH_HLEN + sizeof(struct udphdr) +
@@ -479,15 +474,26 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
hdr_len = ETH_HLEN + sizeof(struct udphdr) +
sizeof(struct ipv6hdr);
rcu_read_lock();
ndev = rdma_read_gid_attr_ndev_rcu(attr);
if (IS_ERR(ndev)) {
rcu_read_unlock();
goto out;
}
skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
GFP_ATOMIC);
if (unlikely(!skb))
if (unlikely(!skb)) {
rcu_read_unlock();
goto out;
}
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
/* FIXME: hold reference to this netdev until life of this skb. */
skb->dev = ndev;
rcu_read_unlock();
if (av->network_type == RDMA_NETWORK_IPV4)
skb->protocol = htons(ETH_P_IP);
else

مشاهده پرونده

@@ -52,12 +52,12 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_AH] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
.flags = RXE_POOL_ATOMIC,
.flags = RXE_POOL_ATOMIC | RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
.size = sizeof(struct rxe_srq),
.flags = RXE_POOL_INDEX,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
},

مشاهده پرونده

@@ -217,8 +217,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
}
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_init_attr *init,
struct ib_ucontext *context,
struct ib_qp_init_attr *init, struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
@@ -254,7 +253,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
if (!qp->sq.queue)
return -ENOMEM;
err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context,
err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
qp->sq.queue->buf, qp->sq.queue->buf_size,
&qp->sq.queue->ip);
@@ -287,7 +286,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_init_attr *init,
struct ib_ucontext *context,
struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
@@ -308,7 +307,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
if (!qp->rq.queue)
return -ENOMEM;
err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context,
err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
qp->rq.queue->buf, qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
@@ -344,8 +343,6 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct rxe_cq *rcq = to_rcq(init->recv_cq);
struct rxe_cq *scq = to_rcq(init->send_cq);
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
struct rxe_ucontext *ucontext =
rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
rxe_add_ref(pd);
rxe_add_ref(rcq);
@@ -360,11 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
rxe_qp_init_misc(rxe, qp, init);
err = rxe_qp_init_req(rxe, qp, init, &ucontext->ibuc, uresp);
err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
if (err)
goto err1;
err = rxe_qp_init_resp(rxe, qp, init, &ucontext->ibuc, uresp);
err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
if (err)
goto err2;

مشاهده پرونده

@@ -36,18 +36,15 @@
#include "rxe_loc.h"
#include "rxe_queue.h"
int do_mmap_info(struct rxe_dev *rxe,
struct mminfo __user *outbuf,
struct ib_ucontext *context,
struct rxe_queue_buf *buf,
size_t buf_size,
struct rxe_mmap_info **ip_p)
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
struct ib_udata *udata, struct rxe_queue_buf *buf,
size_t buf_size, struct rxe_mmap_info **ip_p)
{
int err;
struct rxe_mmap_info *ip = NULL;
if (outbuf) {
ip = rxe_create_mmap_info(rxe, buf_size, context, buf);
ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
if (!ip)
goto err1;
@@ -153,12 +150,9 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
return 0;
}
int rxe_queue_resize(struct rxe_queue *q,
unsigned int *num_elem_p,
unsigned int elem_size,
struct ib_ucontext *context,
struct mminfo __user *outbuf,
spinlock_t *producer_lock,
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
unsigned int elem_size, struct ib_udata *udata,
struct mminfo __user *outbuf, spinlock_t *producer_lock,
spinlock_t *consumer_lock)
{
struct rxe_queue *new_q;
@@ -170,7 +164,7 @@ int rxe_queue_resize(struct rxe_queue *q,
if (!new_q)
return -ENOMEM;
err = do_mmap_info(new_q->rxe, outbuf, context, new_q->buf,
err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
new_q->buf_size, &new_q->ip);
if (err) {
vfree(new_q->buf);

مشاهده پرونده

@@ -76,12 +76,9 @@ struct rxe_queue {
unsigned int index_mask;
};
int do_mmap_info(struct rxe_dev *rxe,
struct mminfo __user *outbuf,
struct ib_ucontext *context,
struct rxe_queue_buf *buf,
size_t buf_size,
struct rxe_mmap_info **ip_p);
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
struct ib_udata *udata, struct rxe_queue_buf *buf,
size_t buf_size, struct rxe_mmap_info **ip_p);
void rxe_queue_reset(struct rxe_queue *q);
@@ -89,10 +86,8 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
int *num_elem,
unsigned int elem_size);
int rxe_queue_resize(struct rxe_queue *q,
unsigned int *num_elem_p,
unsigned int elem_size,
struct ib_ucontext *context,
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
unsigned int elem_size, struct ib_udata *udata,
struct mminfo __user *outbuf,
/* Protect producers while resizing queue */
spinlock_t *producer_lock,

مشاهده پرونده

@@ -99,8 +99,7 @@ err1:
}
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_init_attr *init,
struct ib_ucontext *context,
struct ib_srq_init_attr *init, struct ib_udata *udata,
struct rxe_create_srq_resp __user *uresp)
{
int err;
@@ -128,7 +127,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->rq.queue = q;
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
q->buf_size, &q->ip);
if (err) {
vfree(q->buf);
@@ -149,7 +148,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct rxe_modify_srq_cmd *ucmd)
struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
{
int err;
struct rxe_queue *q = srq->rq.queue;
@@ -163,11 +162,8 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
mi = u64_to_user_ptr(ucmd->mmap_info_addr);
err = rxe_queue_resize(q, &attr->max_wr,
rcv_wqe_size(srq->rq.max_sge),
srq->rq.queue->ip ?
srq->rq.queue->ip->context :
NULL,
mi, &srq->rq.producer_lock,
rcv_wqe_size(srq->rq.max_sge), udata, mi,
&srq->rq.producer_lock,
&srq->rq.consumer_lock);
if (err)
goto err2;

مشاهده پرونده

@@ -176,8 +176,7 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
return 0;
}
static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_udata *udata)
static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
@@ -185,37 +184,31 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
}
static void rxe_dealloc_pd(struct ib_pd *ibpd)
static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_pd *pd = to_rpd(ibpd);
rxe_drop_ref(pd);
}
static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
struct rdma_ah_attr *attr,
u32 flags,
struct ib_udata *udata)
static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
u32 flags, struct ib_udata *udata)
{
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_ah *ah;
struct rxe_dev *rxe = to_rdev(ibah->device);
struct rxe_ah *ah = to_rah(ibah);
err = rxe_av_chk_attr(rxe, attr);
if (err)
return ERR_PTR(err);
return err;
ah = rxe_alloc(&rxe->ah_pool);
if (!ah)
return ERR_PTR(-ENOMEM);
rxe_add_ref(pd);
ah->pd = pd;
err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
if (err)
return err;
rxe_init_av(attr, &ah->av);
return &ah->ibah;
return 0;
}
static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
@@ -242,13 +235,11 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
static void rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
rxe_drop_ref(ah->pd);
rxe_drop_ref(ah);
return 0;
}
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
@@ -298,21 +289,18 @@ err1:
return err;
}
static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
struct ib_srq_init_attr *init,
struct ib_udata *udata)
static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
struct ib_udata *udata)
{
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_ucontext *ucontext =
rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
struct rxe_srq *srq;
struct rxe_dev *rxe = to_rdev(ibsrq->device);
struct rxe_pd *pd = to_rpd(ibsrq->pd);
struct rxe_srq *srq = to_rsrq(ibsrq);
struct rxe_create_srq_resp __user *uresp = NULL;
if (udata) {
if (udata->outlen < sizeof(*uresp))
return ERR_PTR(-EINVAL);
return -EINVAL;
uresp = udata->outbuf;
}
@@ -320,28 +308,24 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
if (err)
goto err1;
srq = rxe_alloc(&rxe->srq_pool);
if (!srq) {
err = -ENOMEM;
err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
if (err)
goto err1;
}
rxe_add_index(srq);
rxe_add_ref(pd);
srq->pd = pd;
err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp);
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err)
goto err2;
return &srq->ibsrq;
return 0;
err2:
rxe_drop_ref(pd);
rxe_drop_index(srq);
rxe_drop_ref(srq);
err1:
return ERR_PTR(err);
return err;
}
static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@ -366,7 +350,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (err)
goto err1;
err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
if (err)
goto err1;
@@ -389,7 +373,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return 0;
}
static int rxe_destroy_srq(struct ib_srq *ibsrq)
static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rxe_srq *srq = to_rsrq(ibsrq);
@@ -397,10 +381,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq)
rxe_queue_cleanup(srq->rq.queue);
rxe_drop_ref(srq->pd);
rxe_drop_index(srq);
rxe_drop_ref(srq);
return 0;
}
static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
@@ -509,7 +490,7 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
}
static int rxe_destroy_qp(struct ib_qp *ibqp)
static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct rxe_qp *qp = to_rqp(ibqp);
@@ -799,7 +780,6 @@ err1:
static struct ib_cq *rxe_create_cq(struct ib_device *dev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata)
{
int err;
@@ -826,8 +806,8 @@ static struct ib_cq *rxe_create_cq(struct ib_device *dev,
goto err1;
}
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
context, uresp);
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
uresp);
if (err)
goto err2;
@@ -839,7 +819,7 @@ err1:
return ERR_PTR(err);
}
static int rxe_destroy_cq(struct ib_cq *ibcq)
static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rxe_cq *cq = to_rcq(ibcq);
@@ -866,7 +846,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
if (err)
goto err1;
err = rxe_cq_resize_queue(cq, cqe, uresp);
err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
if (err)
goto err1;
@@ -990,7 +970,7 @@ err2:
return ERR_PTR(err);
}
static int rxe_dereg_mr(struct ib_mr *ibmr)
static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rxe_mem *mr = to_rmr(ibmr);
@@ -1001,9 +981,8 @@ static int rxe_dereg_mr(struct ib_mr *ibmr)
return 0;
}
static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
enum ib_mr_type mr_type,
u32 max_num_sg)
static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
@@ -1176,7 +1155,10 @@ static const struct ib_device_ops rxe_dev_ops = {
.reg_user_mr = rxe_reg_user_mr,
.req_notify_cq = rxe_req_notify_cq,
.resize_cq = rxe_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
};

مشاهده پرونده

@@ -71,8 +71,8 @@ struct rxe_pd {
};
struct rxe_ah {
struct rxe_pool_entry pelem;
struct ib_ah ibah;
struct rxe_pool_entry pelem;
struct rxe_pd *pd;
struct rxe_av av;
};
@@ -120,8 +120,8 @@ struct rxe_rq {
};
struct rxe_srq {
struct rxe_pool_entry pelem;
struct ib_srq ibsrq;
struct rxe_pool_entry pelem;
struct rxe_pd *pd;
struct rxe_rq rq;
u32 srq_num;