Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BBR TCP congestion control, from Neal Cardwell, Yuchung Cheng and co. at Google. https://lwn.net/Articles/701165/ 2) Do TCP Small Queues for retransmits, from Eric Dumazet. 3) Support collect_md mode for all IPV4 and IPV6 tunnels, from Alexei Starovoitov. 4) Allow cls_flower to classify packets in ip tunnels, from Amir Vadai. 5) Support DSA tagging in older mv88e6xxx switches, from Andrew Lunn. 6) Support GMAC protocol in iwlwifi mwm, from Ayala Beker. 7) Support ndo_poll_controller in mlx5, from Calvin Owens. 8) Move VRF processing to an output hook and allow l3mdev to be loopback, from David Ahern. 9) Support SOCK_DESTROY for UDP sockets. Also from David Ahern. 10) Congestion control in RXRPC, from David Howells. 11) Support geneve RX offload in ixgbe, from Emil Tantilov. 12) When hitting pressure for new incoming TCP data SKBs, perform a partial rathern than a full purge of the OFO queue (which could be huge). From Eric Dumazet. 13) Convert XFRM state and policy lookups to RCU, from Florian Westphal. 14) Support RX network flow classification to igb, from Gangfeng Huang. 15) Hardware offloading of eBPF in nfp driver, from Jakub Kicinski. 16) New skbmod packet action, from Jamal Hadi Salim. 17) Remove some inefficiencies in snmp proc output, from Jia He. 18) Add FIB notifications to properly propagate route changes to hardware which is doing forwarding offloading. From Jiri Pirko. 19) New dsa driver for qca8xxx chips, from John Crispin. 20) Implement RFC7559 ipv6 router solicitation backoff, from Maciej Żenczykowski. 21) Add L3 mode to ipvlan, from Mahesh Bandewar. 22) Support 802.1ad in mlx4, from Moshe Shemesh. 23) Support hardware LRO in mediatek driver, from Nelson Chang. 24) Add TC offloading to mlx5, from Or Gerlitz. 25) Convert various drivers to ethtool ksettings interfaces, from Philippe Reynes. 26) TX max rate limiting for cxgb4, from Rahul Lakkireddy. 27) NAPI support for ath10k, from Rajkumar Manoharan. 28) Support XDP in mlx5, from Rana Shahout and Saeed Mahameed. 29) UDP replicast support in TIPC, from Richard Alpe. 30) Per-queue statistics for qed driver, from Sudarsana Reddy Kalluru. 31) Support BQL in thunderx driver, from Sunil Goutham. 32) TSO support in alx driver, from Tobias Regnery. 33) Add stream parser engine and use it in kcm. 34) Support async DHCP replies in ipconfig module, from Uwe Kleine-König. 35) DSA port fast aging for mv88e6xxx driver, from Vivien Didelot. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1715 commits) mlxsw: switchx2: Fix misuse of hard_header_len mlxsw: spectrum: Fix misuse of hard_header_len net/faraday: Stop NCSI device on shutdown net/ncsi: Introduce ncsi_stop_dev() net/ncsi: Rework the channel monitoring net/ncsi: Allow to extend NCSI request properties net/ncsi: Rework request index allocation net/ncsi: Don't probe on the reserved channel ID (0x1f) net/ncsi: Introduce NCSI_RESERVED_CHANNEL net/ncsi: Avoid unused-value build warning from ia64-linux-gcc net: Add netdev all_adj_list refcnt propagation to fix panic net: phy: Add Edge-rate driver for Microsemi PHYs. vmxnet3: Wake queue from reset work i40e: avoid NULL pointer dereference and recursive errors on early PCI error qed: Add RoCE ll2 & GSI support qed: Add support for memory registeration verbs qed: Add support for QP verbs qed: PD,PKEY and CQ verb support qed: Add support for RoCE hw init qede: Add qedr framework ...
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
config INFINIBAND_CXGB4
|
||||
tristate "Chelsio T4/T5 RDMA Driver"
|
||||
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
|
||||
select CHELSIO_LIB
|
||||
select GENERIC_ALLOCATOR
|
||||
---help---
|
||||
This is an iWARP/RDMA driver for the Chelsio T4 and T5
|
||||
|
@@ -1,4 +1,5 @@
|
||||
ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
|
||||
ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
|
||||
|
||||
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
|
||||
|
||||
|
@@ -49,6 +49,7 @@
|
||||
|
||||
#include <rdma/ib_addr.h>
|
||||
|
||||
#include <libcxgb_cm.h>
|
||||
#include "iw_cxgb4.h"
|
||||
#include "clip_tbl.h"
|
||||
|
||||
@@ -239,15 +240,13 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
|
||||
|
||||
static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
|
||||
{
|
||||
struct cpl_tid_release *req;
|
||||
u32 len = roundup(sizeof(struct cpl_tid_release), 16);
|
||||
|
||||
skb = get_skb(skb, sizeof *req, GFP_KERNEL);
|
||||
skb = get_skb(skb, len, GFP_KERNEL);
|
||||
if (!skb)
|
||||
return;
|
||||
req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
|
||||
INIT_TP_WR(req, hwtid);
|
||||
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
|
||||
set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
|
||||
|
||||
cxgb_mk_tid_release(skb, len, hwtid, 0);
|
||||
c4iw_ofld_send(rdev, skb);
|
||||
return;
|
||||
}
|
||||
@@ -466,72 +465,6 @@ static struct net_device *get_real_dev(struct net_device *egress_dev)
|
||||
return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
|
||||
}
|
||||
|
||||
static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
egress_dev = get_real_dev(egress_dev);
|
||||
for (i = 0; i < dev->rdev.lldi.nports; i++)
|
||||
if (dev->rdev.lldi.ports[i] == egress_dev)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
|
||||
__u8 *peer_ip, __be16 local_port,
|
||||
__be16 peer_port, u8 tos,
|
||||
__u32 sin6_scope_id)
|
||||
{
|
||||
struct dst_entry *dst = NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_IPV6)) {
|
||||
struct flowi6 fl6;
|
||||
|
||||
memset(&fl6, 0, sizeof(fl6));
|
||||
memcpy(&fl6.daddr, peer_ip, 16);
|
||||
memcpy(&fl6.saddr, local_ip, 16);
|
||||
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
|
||||
fl6.flowi6_oif = sin6_scope_id;
|
||||
dst = ip6_route_output(&init_net, NULL, &fl6);
|
||||
if (!dst)
|
||||
goto out;
|
||||
if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
|
||||
!(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
|
||||
dst_release(dst);
|
||||
dst = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return dst;
|
||||
}
|
||||
|
||||
static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
|
||||
__be32 peer_ip, __be16 local_port,
|
||||
__be16 peer_port, u8 tos)
|
||||
{
|
||||
struct rtable *rt;
|
||||
struct flowi4 fl4;
|
||||
struct neighbour *n;
|
||||
|
||||
rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
|
||||
peer_port, local_port, IPPROTO_TCP,
|
||||
tos, 0);
|
||||
if (IS_ERR(rt))
|
||||
return NULL;
|
||||
n = dst_neigh_lookup(&rt->dst, &peer_ip);
|
||||
if (!n)
|
||||
return NULL;
|
||||
if (!our_interface(dev, n->dev) &&
|
||||
!(n->dev->flags & IFF_LOOPBACK)) {
|
||||
neigh_release(n);
|
||||
dst_release(&rt->dst);
|
||||
return NULL;
|
||||
}
|
||||
neigh_release(n);
|
||||
return &rt->dst;
|
||||
}
|
||||
|
||||
static void arp_failure_discard(void *handle, struct sk_buff *skb)
|
||||
{
|
||||
pr_err(MOD "ARP failure\n");
|
||||
@@ -706,58 +639,34 @@ static int send_flowc(struct c4iw_ep *ep)
|
||||
|
||||
static int send_halfclose(struct c4iw_ep *ep)
|
||||
{
|
||||
struct cpl_close_con_req *req;
|
||||
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||
int wrlen = roundup(sizeof *req, 16);
|
||||
u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
if (WARN_ON(!skb))
|
||||
return -ENOMEM;
|
||||
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
|
||||
req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
|
||||
memset(req, 0, wrlen);
|
||||
INIT_TP_WR(req, ep->hwtid);
|
||||
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
|
||||
ep->hwtid));
|
||||
cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
|
||||
NULL, arp_failure_discard);
|
||||
|
||||
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||
}
|
||||
|
||||
static int send_abort(struct c4iw_ep *ep)
|
||||
{
|
||||
struct cpl_abort_req *req;
|
||||
int wrlen = roundup(sizeof *req, 16);
|
||||
u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
|
||||
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
if (WARN_ON(!req_skb))
|
||||
return -ENOMEM;
|
||||
|
||||
set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
t4_set_arp_err_handler(req_skb, ep, abort_arp_failure);
|
||||
req = (struct cpl_abort_req *)skb_put(req_skb, wrlen);
|
||||
memset(req, 0, wrlen);
|
||||
INIT_TP_WR(req, ep->hwtid);
|
||||
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
|
||||
req->cmd = CPL_ABORT_SEND_RST;
|
||||
cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
|
||||
ep, abort_arp_failure);
|
||||
|
||||
return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
|
||||
}
|
||||
|
||||
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
|
||||
unsigned int *idx, int use_ts, int ipv6)
|
||||
{
|
||||
unsigned short hdr_size = (ipv6 ?
|
||||
sizeof(struct ipv6hdr) :
|
||||
sizeof(struct iphdr)) +
|
||||
sizeof(struct tcphdr) +
|
||||
(use_ts ?
|
||||
round_up(TCPOLEN_TIMESTAMP, 4) : 0);
|
||||
unsigned short data_size = mtu - hdr_size;
|
||||
|
||||
cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
|
||||
}
|
||||
|
||||
static int send_connect(struct c4iw_ep *ep)
|
||||
{
|
||||
struct cpl_act_open_req *req = NULL;
|
||||
@@ -770,7 +679,7 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
u64 opt0;
|
||||
u32 opt2;
|
||||
unsigned int mtu_idx;
|
||||
int wscale;
|
||||
u32 wscale;
|
||||
int win, sizev4, sizev6, wrlen;
|
||||
struct sockaddr_in *la = (struct sockaddr_in *)
|
||||
&ep->com.local_addr;
|
||||
@@ -817,10 +726,10 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
|
||||
|
||||
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||
enable_tcp_timestamps,
|
||||
(AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
|
||||
wscale = compute_wscale(rcv_win);
|
||||
cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||
enable_tcp_timestamps,
|
||||
(ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
|
||||
wscale = cxgb_compute_wscale(rcv_win);
|
||||
|
||||
/*
|
||||
* Specify the largest window that will fit in opt0. The
|
||||
@@ -1447,9 +1356,9 @@ static void established_upcall(struct c4iw_ep *ep)
|
||||
|
||||
static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
|
||||
{
|
||||
struct cpl_rx_data_ack *req;
|
||||
struct sk_buff *skb;
|
||||
int wrlen = roundup(sizeof *req, 16);
|
||||
u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
|
||||
u32 credit_dack;
|
||||
|
||||
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
|
||||
skb = get_skb(NULL, wrlen, GFP_KERNEL);
|
||||
@@ -1466,15 +1375,12 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
|
||||
if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
|
||||
credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
|
||||
|
||||
req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
|
||||
memset(req, 0, wrlen);
|
||||
INIT_TP_WR(req, ep->hwtid);
|
||||
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
|
||||
ep->hwtid));
|
||||
req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
|
||||
RX_DACK_CHANGE_F |
|
||||
RX_DACK_MODE_V(dack_mode));
|
||||
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
|
||||
credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
|
||||
RX_DACK_MODE_V(dack_mode);
|
||||
|
||||
cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
|
||||
credit_dack);
|
||||
|
||||
c4iw_ofld_send(&ep->com.dev->rdev, skb);
|
||||
return credits;
|
||||
}
|
||||
@@ -1972,7 +1878,7 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
||||
struct sk_buff *skb;
|
||||
struct fw_ofld_connection_wr *req;
|
||||
unsigned int mtu_idx;
|
||||
int wscale;
|
||||
u32 wscale;
|
||||
struct sockaddr_in *sin;
|
||||
int win;
|
||||
|
||||
@@ -1997,10 +1903,10 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
||||
htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
|
||||
req->tcb.tx_max = (__force __be32) jiffies;
|
||||
req->tcb.rcv_adv = htons(1);
|
||||
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||
enable_tcp_timestamps,
|
||||
(AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
|
||||
wscale = compute_wscale(rcv_win);
|
||||
cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||
enable_tcp_timestamps,
|
||||
(ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
|
||||
wscale = cxgb_compute_wscale(rcv_win);
|
||||
|
||||
/*
|
||||
* Specify the largest window that will fit in opt0. The
|
||||
@@ -2054,15 +1960,6 @@ static inline int act_open_has_tid(int status)
|
||||
status != CPL_ERR_CONN_EXIST);
|
||||
}
|
||||
|
||||
/* Returns whether a CPL status conveys negative advice.
|
||||
*/
|
||||
static int is_neg_adv(unsigned int status)
|
||||
{
|
||||
return status == CPL_ERR_RTX_NEG_ADVICE ||
|
||||
status == CPL_ERR_PERSIST_NEG_ADVICE ||
|
||||
status == CPL_ERR_KEEPALV_NEG_ADVICE;
|
||||
}
|
||||
|
||||
static char *neg_adv_str(unsigned int status)
|
||||
{
|
||||
switch (status) {
|
||||
@@ -2218,16 +2115,21 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
|
||||
|
||||
/* find a route */
|
||||
if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
|
||||
ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
|
||||
raddr->sin_addr.s_addr, laddr->sin_port,
|
||||
raddr->sin_port, ep->com.cm_id->tos);
|
||||
ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
|
||||
laddr->sin_addr.s_addr,
|
||||
raddr->sin_addr.s_addr,
|
||||
laddr->sin_port,
|
||||
raddr->sin_port, ep->com.cm_id->tos);
|
||||
iptype = 4;
|
||||
ra = (__u8 *)&raddr->sin_addr;
|
||||
} else {
|
||||
ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
|
||||
raddr6->sin6_addr.s6_addr,
|
||||
laddr6->sin6_port, raddr6->sin6_port, 0,
|
||||
raddr6->sin6_scope_id);
|
||||
ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
|
||||
get_real_dev,
|
||||
laddr6->sin6_addr.s6_addr,
|
||||
raddr6->sin6_addr.s6_addr,
|
||||
laddr6->sin6_port,
|
||||
raddr6->sin6_port, 0,
|
||||
raddr6->sin6_scope_id);
|
||||
iptype = 6;
|
||||
ra = (__u8 *)&raddr6->sin6_addr;
|
||||
}
|
||||
@@ -2299,7 +2201,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
|
||||
status, status2errno(status));
|
||||
|
||||
if (is_neg_adv(status)) {
|
||||
if (cxgb_is_neg_adv(status)) {
|
||||
PDBG("%s Connection problems for atid %u status %u (%s)\n",
|
||||
__func__, atid, status, neg_adv_str(status));
|
||||
ep->stats.connect_neg_adv++;
|
||||
@@ -2426,7 +2328,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
||||
unsigned int mtu_idx;
|
||||
u64 opt0;
|
||||
u32 opt2;
|
||||
int wscale;
|
||||
u32 wscale;
|
||||
struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
|
||||
int win;
|
||||
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
|
||||
@@ -2447,10 +2349,10 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
||||
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
|
||||
ep->hwtid));
|
||||
|
||||
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||
enable_tcp_timestamps && req->tcpopt.tstamp,
|
||||
(AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
|
||||
wscale = compute_wscale(rcv_win);
|
||||
cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||
enable_tcp_timestamps && req->tcpopt.tstamp,
|
||||
(ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
|
||||
wscale = cxgb_compute_wscale(rcv_win);
|
||||
|
||||
/*
|
||||
* Specify the largest window that will fit in opt0. The
|
||||
@@ -2522,42 +2424,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
|
||||
static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
|
||||
int *iptype, __u8 *local_ip, __u8 *peer_ip,
|
||||
__be16 *local_port, __be16 *peer_port)
|
||||
{
|
||||
int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
|
||||
ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
|
||||
T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
|
||||
int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
|
||||
IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
|
||||
T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
|
||||
struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
|
||||
struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
|
||||
struct tcphdr *tcp = (struct tcphdr *)
|
||||
((u8 *)(req + 1) + eth_len + ip_len);
|
||||
|
||||
if (ip->version == 4) {
|
||||
PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
|
||||
ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
|
||||
ntohs(tcp->dest));
|
||||
*iptype = 4;
|
||||
memcpy(peer_ip, &ip->saddr, 4);
|
||||
memcpy(local_ip, &ip->daddr, 4);
|
||||
} else {
|
||||
PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
|
||||
ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
|
||||
ntohs(tcp->dest));
|
||||
*iptype = 6;
|
||||
memcpy(peer_ip, ip6->saddr.s6_addr, 16);
|
||||
memcpy(local_ip, ip6->daddr.s6_addr, 16);
|
||||
}
|
||||
*peer_port = tcp->source;
|
||||
*local_port = tcp->dest;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct c4iw_ep *child_ep = NULL, *parent_ep;
|
||||
@@ -2586,8 +2452,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
goto reject;
|
||||
}
|
||||
|
||||
get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype,
|
||||
local_ip, peer_ip, &local_port, &peer_port);
|
||||
cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
|
||||
&iptype, local_ip, peer_ip, &local_port, &peer_port);
|
||||
|
||||
/* Find output route */
|
||||
if (iptype == 4) {
|
||||
@@ -2595,18 +2461,19 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
, __func__, parent_ep, hwtid,
|
||||
local_ip, peer_ip, ntohs(local_port),
|
||||
ntohs(peer_port), peer_mss);
|
||||
dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
|
||||
local_port, peer_port,
|
||||
tos);
|
||||
dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
|
||||
*(__be32 *)local_ip, *(__be32 *)peer_ip,
|
||||
local_port, peer_port, tos);
|
||||
} else {
|
||||
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
|
||||
, __func__, parent_ep, hwtid,
|
||||
local_ip, peer_ip, ntohs(local_port),
|
||||
ntohs(peer_port), peer_mss);
|
||||
dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
|
||||
PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
|
||||
((struct sockaddr_in6 *)
|
||||
&parent_ep->com.local_addr)->sin6_scope_id);
|
||||
dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
|
||||
local_ip, peer_ip, local_port, peer_port,
|
||||
PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
|
||||
((struct sockaddr_in6 *)
|
||||
&parent_ep->com.local_addr)->sin6_scope_id);
|
||||
}
|
||||
if (!dst) {
|
||||
printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
|
||||
@@ -2839,18 +2706,18 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct cpl_abort_req_rss *req = cplhdr(skb);
|
||||
struct c4iw_ep *ep;
|
||||
struct cpl_abort_rpl *rpl;
|
||||
struct sk_buff *rpl_skb;
|
||||
struct c4iw_qp_attributes attrs;
|
||||
int ret;
|
||||
int release = 0;
|
||||
unsigned int tid = GET_TID(req);
|
||||
u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
|
||||
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
if (!ep)
|
||||
return 0;
|
||||
|
||||
if (is_neg_adv(req->status)) {
|
||||
if (cxgb_is_neg_adv(req->status)) {
|
||||
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
|
||||
__func__, ep->hwtid, req->status,
|
||||
neg_adv_str(req->status));
|
||||
@@ -2943,11 +2810,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
release = 1;
|
||||
goto out;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
|
||||
INIT_TP_WR(rpl, ep->hwtid);
|
||||
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
|
||||
rpl->cmd = CPL_ABORT_NO_RST;
|
||||
|
||||
cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
|
||||
|
||||
c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
|
||||
out:
|
||||
if (release)
|
||||
@@ -3379,9 +3244,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
|
||||
__func__, &laddr->sin_addr, ntohs(laddr->sin_port),
|
||||
ra, ntohs(raddr->sin_port));
|
||||
ep->dst = find_route(dev, laddr->sin_addr.s_addr,
|
||||
raddr->sin_addr.s_addr, laddr->sin_port,
|
||||
raddr->sin_port, cm_id->tos);
|
||||
ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
|
||||
laddr->sin_addr.s_addr,
|
||||
raddr->sin_addr.s_addr,
|
||||
laddr->sin_port,
|
||||
raddr->sin_port, cm_id->tos);
|
||||
} else {
|
||||
iptype = 6;
|
||||
ra = (__u8 *)&raddr6->sin6_addr;
|
||||
@@ -3400,10 +3267,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
__func__, laddr6->sin6_addr.s6_addr,
|
||||
ntohs(laddr6->sin6_port),
|
||||
raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
|
||||
ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
|
||||
raddr6->sin6_addr.s6_addr,
|
||||
laddr6->sin6_port, raddr6->sin6_port, 0,
|
||||
raddr6->sin6_scope_id);
|
||||
ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
|
||||
laddr6->sin6_addr.s6_addr,
|
||||
raddr6->sin6_addr.s6_addr,
|
||||
laddr6->sin6_port,
|
||||
raddr6->sin6_port, 0,
|
||||
raddr6->sin6_scope_id);
|
||||
}
|
||||
if (!ep->dst) {
|
||||
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
|
||||
@@ -4045,8 +3914,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
|
||||
ntohs(tcph->source), iph->tos);
|
||||
|
||||
dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
|
||||
iph->tos);
|
||||
dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
|
||||
iph->daddr, iph->saddr, tcph->dest,
|
||||
tcph->source, iph->tos);
|
||||
if (!dst) {
|
||||
pr_err("%s - failed to find dst entry!\n",
|
||||
__func__);
|
||||
@@ -4321,7 +4191,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
if (is_neg_adv(req->status)) {
|
||||
if (cxgb_is_neg_adv(req->status)) {
|
||||
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
|
||||
__func__, ep->hwtid, req->status,
|
||||
neg_adv_str(req->status));
|
||||
|
@@ -1480,6 +1480,10 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
|
||||
|
||||
static struct cxgb4_uld_info c4iw_uld_info = {
|
||||
.name = DRV_NAME,
|
||||
.nrxq = MAX_ULD_QSETS,
|
||||
.rxq_size = 511,
|
||||
.ciq = true,
|
||||
.lro = false,
|
||||
.add = c4iw_uld_add,
|
||||
.rx_handler = c4iw_uld_rx_handler,
|
||||
.state_change = c4iw_uld_state_change,
|
||||
|
@@ -882,15 +882,6 @@ static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
|
||||
return cm_id->provider_data;
|
||||
}
|
||||
|
||||
static inline int compute_wscale(int win)
|
||||
{
|
||||
int wscale = 0;
|
||||
|
||||
while (wscale < 14 && (65535<<wscale) < win)
|
||||
wscale++;
|
||||
return wscale;
|
||||
}
|
||||
|
||||
static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
|
||||
{
|
||||
#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
|
||||
|
@@ -729,14 +729,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
|
||||
|
||||
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
|
||||
int entries, struct mlx5_create_cq_mbox_in **cqb,
|
||||
int entries, u32 **cqb,
|
||||
int *cqe_size, int *index, int *inlen)
|
||||
{
|
||||
struct mlx5_ib_create_cq ucmd;
|
||||
size_t ucmdlen;
|
||||
int page_shift;
|
||||
__be64 *pas;
|
||||
int npages;
|
||||
int ncont;
|
||||
void *cqc;
|
||||
int err;
|
||||
|
||||
ucmdlen =
|
||||
@@ -774,14 +776,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
|
||||
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
|
||||
|
||||
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
|
||||
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
||||
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
|
||||
*cqb = mlx5_vzalloc(*inlen);
|
||||
if (!*cqb) {
|
||||
err = -ENOMEM;
|
||||
goto err_db;
|
||||
}
|
||||
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
|
||||
(*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
|
||||
|
||||
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
|
||||
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
|
||||
|
||||
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
|
||||
MLX5_SET(cqc, cqc, log_page_size,
|
||||
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
|
||||
*index = to_mucontext(context)->uuari.uars[0].index;
|
||||
|
||||
@@ -816,9 +824,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
|
||||
|
||||
static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
|
||||
int entries, int cqe_size,
|
||||
struct mlx5_create_cq_mbox_in **cqb,
|
||||
int *index, int *inlen)
|
||||
u32 **cqb, int *index, int *inlen)
|
||||
{
|
||||
__be64 *pas;
|
||||
void *cqc;
|
||||
int err;
|
||||
|
||||
err = mlx5_db_alloc(dev->mdev, &cq->db);
|
||||
@@ -835,15 +844,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
|
||||
|
||||
init_cq_buf(cq, &cq->buf);
|
||||
|
||||
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
|
||||
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
||||
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
|
||||
*cqb = mlx5_vzalloc(*inlen);
|
||||
if (!*cqb) {
|
||||
err = -ENOMEM;
|
||||
goto err_buf;
|
||||
}
|
||||
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
|
||||
|
||||
(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
|
||||
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
|
||||
mlx5_fill_page_array(&cq->buf.buf, pas);
|
||||
|
||||
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
|
||||
MLX5_SET(cqc, cqc, log_page_size,
|
||||
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
|
||||
*index = dev->mdev->priv.uuari.uars[0].index;
|
||||
|
||||
return 0;
|
||||
@@ -877,11 +892,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
||||
{
|
||||
int entries = attr->cqe;
|
||||
int vector = attr->comp_vector;
|
||||
struct mlx5_create_cq_mbox_in *cqb = NULL;
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx5_ib_cq *cq;
|
||||
int uninitialized_var(index);
|
||||
int uninitialized_var(inlen);
|
||||
u32 *cqb = NULL;
|
||||
void *cqc;
|
||||
int cqe_size;
|
||||
unsigned int irqn;
|
||||
int eqn;
|
||||
@@ -927,19 +943,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
||||
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
|
||||
}
|
||||
|
||||
cq->cqe_size = cqe_size;
|
||||
cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
|
||||
|
||||
if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
|
||||
cqb->ctx.cqe_sz_flags |= (1 << 1);
|
||||
|
||||
cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
|
||||
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
|
||||
if (err)
|
||||
goto err_cqb;
|
||||
|
||||
cqb->ctx.c_eqn = cpu_to_be16(eqn);
|
||||
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
|
||||
cq->cqe_size = cqe_size;
|
||||
|
||||
cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
|
||||
MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
|
||||
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
|
||||
MLX5_SET(cqc, cqc, uar_page, index);
|
||||
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
||||
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
|
||||
if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
|
||||
MLX5_SET(cqc, cqc, oi, 1);
|
||||
|
||||
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
|
||||
if (err)
|
||||
@@ -1070,27 +1087,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
|
||||
|
||||
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
||||
{
|
||||
struct mlx5_modify_cq_mbox_in *in;
|
||||
struct mlx5_ib_dev *dev = to_mdev(cq->device);
|
||||
struct mlx5_ib_cq *mcq = to_mcq(cq);
|
||||
int err;
|
||||
u32 fsel;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
|
||||
return -ENOSYS;
|
||||
|
||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
in->cqn = cpu_to_be32(mcq->mcq.cqn);
|
||||
fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
|
||||
in->ctx.cq_period = cpu_to_be16(cq_period);
|
||||
in->ctx.cq_max_count = cpu_to_be16(cq_count);
|
||||
in->field_select = cpu_to_be32(fsel);
|
||||
err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
|
||||
kfree(in);
|
||||
|
||||
err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
|
||||
cq_period, cq_count);
|
||||
if (err)
|
||||
mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
|
||||
|
||||
@@ -1223,9 +1228,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
|
||||
struct mlx5_ib_cq *cq = to_mcq(ibcq);
|
||||
struct mlx5_modify_cq_mbox_in *in;
|
||||
void *cqc;
|
||||
u32 *in;
|
||||
int err;
|
||||
int npas;
|
||||
__be64 *pas;
|
||||
int page_shift;
|
||||
int inlen;
|
||||
int uninitialized_var(cqe_size);
|
||||
@@ -1267,28 +1274,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
if (err)
|
||||
goto ex;
|
||||
|
||||
inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
|
||||
inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
|
||||
MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
|
||||
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in) {
|
||||
err = -ENOMEM;
|
||||
goto ex_resize;
|
||||
}
|
||||
|
||||
pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
|
||||
if (udata)
|
||||
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
|
||||
in->pas, 0);
|
||||
pas, 0);
|
||||
else
|
||||
mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
|
||||
mlx5_fill_page_array(&cq->resize_buf->buf, pas);
|
||||
|
||||
in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
|
||||
MLX5_MODIFY_CQ_MASK_PG_OFFSET |
|
||||
MLX5_MODIFY_CQ_MASK_PG_SIZE);
|
||||
in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
|
||||
in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
|
||||
in->ctx.page_offset = 0;
|
||||
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
|
||||
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
|
||||
in->cqn = cpu_to_be32(cq->mcq.cqn);
|
||||
MLX5_SET(modify_cq_in, in,
|
||||
modify_field_select_resize_field_select.resize_field_select.resize_field_select,
|
||||
MLX5_MODIFY_CQ_MASK_LOG_SIZE |
|
||||
MLX5_MODIFY_CQ_MASK_PG_OFFSET |
|
||||
MLX5_MODIFY_CQ_MASK_PG_SIZE);
|
||||
|
||||
cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
|
||||
|
||||
MLX5_SET(cqc, cqc, log_page_size,
|
||||
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
|
||||
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
|
||||
|
||||
MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
|
||||
MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
|
||||
|
||||
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
|
||||
if (err)
|
||||
|
@@ -232,23 +232,19 @@ static int set_roce_addr(struct ib_device *device, u8 port_num,
|
||||
const union ib_gid *gid,
|
||||
const struct ib_gid_attr *attr)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(device);
|
||||
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(set_roce_address_out)];
|
||||
struct mlx5_ib_dev *dev = to_mdev(device);
|
||||
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
|
||||
void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
|
||||
enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
|
||||
|
||||
if (ll != IB_LINK_LAYER_ETHERNET)
|
||||
return -EINVAL;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
|
||||
|
||||
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
|
||||
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
@@ -753,8 +749,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
|
||||
&props->active_width);
|
||||
if (err)
|
||||
goto out;
|
||||
err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
|
||||
port);
|
||||
err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@@ -505,7 +505,7 @@ struct mlx5_ib_mr {
|
||||
int umred;
|
||||
int npages;
|
||||
struct mlx5_ib_dev *dev;
|
||||
struct mlx5_create_mkey_mbox_out out;
|
||||
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
|
||||
struct mlx5_core_sig_ctx *sig;
|
||||
int live;
|
||||
void *descs_alloc;
|
||||
|
@@ -135,20 +135,10 @@ static void reg_mr_callback(int status, void *context)
|
||||
return;
|
||||
}
|
||||
|
||||
if (mr->out.hdr.status) {
|
||||
mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
|
||||
mr->out.hdr.status,
|
||||
be32_to_cpu(mr->out.hdr.syndrome));
|
||||
kfree(mr);
|
||||
dev->fill_delay = 1;
|
||||
mod_timer(&dev->delay_timer, jiffies + HZ);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
|
||||
key = dev->mdev->priv.mkey_key++;
|
||||
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
|
||||
mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
|
||||
mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
|
||||
|
||||
cache->last_add = jiffies;
|
||||
|
||||
@@ -170,16 +160,19 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
|
||||
{
|
||||
struct mlx5_mr_cache *cache = &dev->cache;
|
||||
struct mlx5_cache_ent *ent = &cache->ent[c];
|
||||
struct mlx5_create_mkey_mbox_in *in;
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
||||
struct mlx5_ib_mr *mr;
|
||||
int npages = 1 << ent->order;
|
||||
void *mkc;
|
||||
u32 *in;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
||||
in = kzalloc(inlen, GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||
for (i = 0; i < num; i++) {
|
||||
if (ent->pending >= MAX_PENDING_REG_MR) {
|
||||
err = -EAGAIN;
|
||||
@@ -194,18 +187,22 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
|
||||
mr->order = ent->order;
|
||||
mr->umred = 1;
|
||||
mr->dev = dev;
|
||||
in->seg.status = MLX5_MKEY_STATUS_FREE;
|
||||
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
|
||||
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
|
||||
in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
|
||||
in->seg.log2_page_size = 12;
|
||||
|
||||
MLX5_SET(mkc, mkc, free, 1);
|
||||
MLX5_SET(mkc, mkc, umr_en, 1);
|
||||
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
|
||||
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2);
|
||||
MLX5_SET(mkc, mkc, log_page_size, 12);
|
||||
|
||||
spin_lock_irq(&ent->lock);
|
||||
ent->pending++;
|
||||
spin_unlock_irq(&ent->lock);
|
||||
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in,
|
||||
sizeof(*in), reg_mr_callback,
|
||||
mr, &mr->out);
|
||||
err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
|
||||
in, inlen,
|
||||
mr->out, sizeof(mr->out),
|
||||
reg_mr_callback, mr);
|
||||
if (err) {
|
||||
spin_lock_irq(&ent->lock);
|
||||
ent->pending--;
|
||||
@@ -670,30 +667,38 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
|
||||
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_create_mkey_mbox_in *in;
|
||||
struct mlx5_mkey_seg *seg;
|
||||
struct mlx5_ib_mr *mr;
|
||||
void *mkc;
|
||||
u32 *in;
|
||||
int err;
|
||||
|
||||
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
||||
if (!mr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
||||
in = kzalloc(inlen, GFP_KERNEL);
|
||||
if (!in) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
seg = &in->seg;
|
||||
seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
|
||||
seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
|
||||
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
|
||||
seg->start_addr = 0;
|
||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||
|
||||
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL,
|
||||
NULL);
|
||||
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
|
||||
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
|
||||
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
|
||||
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
|
||||
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
|
||||
MLX5_SET(mkc, mkc, lr, 1);
|
||||
|
||||
MLX5_SET(mkc, mkc, length64, 1);
|
||||
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
MLX5_SET64(mkc, mkc, start_addr, 0);
|
||||
|
||||
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
|
||||
if (err)
|
||||
goto err_in;
|
||||
|
||||
@@ -1063,9 +1068,11 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
||||
int page_shift, int access_flags)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx5_create_mkey_mbox_in *in;
|
||||
struct mlx5_ib_mr *mr;
|
||||
__be64 *pas;
|
||||
void *mkc;
|
||||
int inlen;
|
||||
u32 *in;
|
||||
int err;
|
||||
bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
|
||||
|
||||
@@ -1073,31 +1080,41 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
||||
if (!mr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
|
||||
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
|
||||
sizeof(*pas) * ((npages + 1) / 2) * 2;
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in) {
|
||||
err = -ENOMEM;
|
||||
goto err_1;
|
||||
}
|
||||
mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
|
||||
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
|
||||
mlx5_ib_populate_pas(dev, umem, page_shift, pas,
|
||||
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
|
||||
|
||||
/* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
|
||||
/* The pg_access bit allows setting the access flags
|
||||
* in the page list submitted with the command. */
|
||||
in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
|
||||
in->seg.flags = convert_access(access_flags) |
|
||||
MLX5_ACCESS_MODE_MTT;
|
||||
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
|
||||
in->seg.start_addr = cpu_to_be64(virt_addr);
|
||||
in->seg.len = cpu_to_be64(length);
|
||||
in->seg.bsfs_octo_size = 0;
|
||||
in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
|
||||
in->seg.log2_page_size = page_shift;
|
||||
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
|
||||
in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
|
||||
1 << page_shift));
|
||||
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL,
|
||||
NULL, NULL);
|
||||
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
|
||||
|
||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
|
||||
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
|
||||
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
|
||||
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
|
||||
MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
|
||||
MLX5_SET(mkc, mkc, lr, 1);
|
||||
|
||||
MLX5_SET64(mkc, mkc, start_addr, virt_addr);
|
||||
MLX5_SET64(mkc, mkc, len, length);
|
||||
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
||||
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
|
||||
MLX5_SET(mkc, mkc, translations_octword_size,
|
||||
get_octo_len(virt_addr, length, 1 << page_shift));
|
||||
MLX5_SET(mkc, mkc, log_page_size, page_shift);
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
|
||||
get_octo_len(virt_addr, length, 1 << page_shift));
|
||||
|
||||
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "create mkey failed\n");
|
||||
goto err_2;
|
||||
@@ -1523,30 +1540,32 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||
u32 max_num_sg)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx5_create_mkey_mbox_in *in;
|
||||
struct mlx5_ib_mr *mr;
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
||||
int ndescs = ALIGN(max_num_sg, 4);
|
||||
struct mlx5_ib_mr *mr;
|
||||
void *mkc;
|
||||
u32 *in;
|
||||
int err;
|
||||
|
||||
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
||||
if (!mr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
||||
in = kzalloc(inlen, GFP_KERNEL);
|
||||
if (!in) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
in->seg.status = MLX5_MKEY_STATUS_FREE;
|
||||
in->seg.xlt_oct_size = cpu_to_be32(ndescs);
|
||||
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
|
||||
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
|
||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||
MLX5_SET(mkc, mkc, free, 1);
|
||||
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
||||
|
||||
if (mr_type == IB_MR_TYPE_MEM_REG) {
|
||||
mr->access_mode = MLX5_ACCESS_MODE_MTT;
|
||||
in->seg.log2_page_size = PAGE_SHIFT;
|
||||
|
||||
mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
|
||||
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
|
||||
err = mlx5_alloc_priv_descs(pd->device, mr,
|
||||
ndescs, sizeof(u64));
|
||||
if (err)
|
||||
@@ -1555,7 +1574,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||
mr->desc_size = sizeof(u64);
|
||||
mr->max_descs = ndescs;
|
||||
} else if (mr_type == IB_MR_TYPE_SG_GAPS) {
|
||||
mr->access_mode = MLX5_ACCESS_MODE_KLM;
|
||||
mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
|
||||
|
||||
err = mlx5_alloc_priv_descs(pd->device, mr,
|
||||
ndescs, sizeof(struct mlx5_klm));
|
||||
@@ -1566,9 +1585,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||
} else if (mr_type == IB_MR_TYPE_SIGNATURE) {
|
||||
u32 psv_index[2];
|
||||
|
||||
in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
|
||||
MLX5_MKEY_BSF_EN);
|
||||
in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
|
||||
MLX5_SET(mkc, mkc, bsf_en, 1);
|
||||
MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
|
||||
mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
|
||||
if (!mr->sig) {
|
||||
err = -ENOMEM;
|
||||
@@ -1581,7 +1599,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||
if (err)
|
||||
goto err_free_sig;
|
||||
|
||||
mr->access_mode = MLX5_ACCESS_MODE_KLM;
|
||||
mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
|
||||
mr->sig->psv_memory.psv_idx = psv_index[0];
|
||||
mr->sig->psv_wire.psv_idx = psv_index[1];
|
||||
|
||||
@@ -1595,9 +1613,10 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||
goto err_free_in;
|
||||
}
|
||||
|
||||
in->seg.flags = MLX5_PERM_UMR_EN | mr->access_mode;
|
||||
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in),
|
||||
NULL, NULL, NULL);
|
||||
MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
|
||||
MLX5_SET(mkc, mkc, umr_en, 1);
|
||||
|
||||
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
|
||||
if (err)
|
||||
goto err_destroy_psv;
|
||||
|
||||
@@ -1633,8 +1652,10 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx5_create_mkey_mbox_in *in = NULL;
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
||||
struct mlx5_ib_mw *mw = NULL;
|
||||
u32 *in = NULL;
|
||||
void *mkc;
|
||||
int ndescs;
|
||||
int err;
|
||||
struct mlx5_ib_alloc_mw req = {};
|
||||
@@ -1658,23 +1679,24 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
|
||||
|
||||
mw = kzalloc(sizeof(*mw), GFP_KERNEL);
|
||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
||||
in = kzalloc(inlen, GFP_KERNEL);
|
||||
if (!mw || !in) {
|
||||
err = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
|
||||
in->seg.status = MLX5_MKEY_STATUS_FREE;
|
||||
in->seg.xlt_oct_size = cpu_to_be32(ndescs);
|
||||
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
|
||||
in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM |
|
||||
MLX5_PERM_LOCAL_READ;
|
||||
if (type == IB_MW_TYPE_2)
|
||||
in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
|
||||
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
|
||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||
|
||||
err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in),
|
||||
NULL, NULL, NULL);
|
||||
MLX5_SET(mkc, mkc, free, 1);
|
||||
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
|
||||
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
||||
MLX5_SET(mkc, mkc, umr_en, 1);
|
||||
MLX5_SET(mkc, mkc, lr, 1);
|
||||
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
|
||||
MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
|
||||
err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
|
||||
if (err)
|
||||
goto free;
|
||||
|
||||
@@ -1811,7 +1833,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||
mr->desc_size * mr->max_descs,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
|
||||
if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
|
||||
n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
|
||||
else
|
||||
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
|
||||
|
@@ -726,7 +726,7 @@ err_umem:
|
||||
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
struct mlx5_ib_qp *qp, struct ib_udata *udata,
|
||||
struct ib_qp_init_attr *attr,
|
||||
struct mlx5_create_qp_mbox_in **in,
|
||||
u32 **in,
|
||||
struct mlx5_ib_create_qp_resp *resp, int *inlen,
|
||||
struct mlx5_ib_qp_base *base)
|
||||
{
|
||||
@@ -739,6 +739,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
u32 offset = 0;
|
||||
int uuarn;
|
||||
int ncont = 0;
|
||||
__be64 *pas;
|
||||
void *qpc;
|
||||
int err;
|
||||
|
||||
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
|
||||
@@ -795,20 +797,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
ubuffer->umem = NULL;
|
||||
}
|
||||
|
||||
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
|
||||
*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
|
||||
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
|
||||
*in = mlx5_vzalloc(*inlen);
|
||||
if (!*in) {
|
||||
err = -ENOMEM;
|
||||
goto err_umem;
|
||||
}
|
||||
if (ubuffer->umem)
|
||||
mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift,
|
||||
(*in)->pas, 0);
|
||||
(*in)->ctx.log_pg_sz_remote_qpn =
|
||||
cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
|
||||
(*in)->ctx.params2 = cpu_to_be32(offset << 6);
|
||||
|
||||
(*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
|
||||
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
|
||||
if (ubuffer->umem)
|
||||
mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
|
||||
|
||||
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
|
||||
|
||||
MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET(qpc, qpc, page_offset, offset);
|
||||
|
||||
MLX5_SET(qpc, qpc, uar_page, uar_index);
|
||||
resp->uuar_index = uuarn;
|
||||
qp->uuarn = uuarn;
|
||||
|
||||
@@ -857,12 +863,13 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
|
||||
static int create_kernel_qp(struct mlx5_ib_dev *dev,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct mlx5_ib_qp *qp,
|
||||
struct mlx5_create_qp_mbox_in **in, int *inlen,
|
||||
u32 **in, int *inlen,
|
||||
struct mlx5_ib_qp_base *base)
|
||||
{
|
||||
enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
|
||||
struct mlx5_uuar_info *uuari;
|
||||
int uar_index;
|
||||
void *qpc;
|
||||
int uuarn;
|
||||
int err;
|
||||
|
||||
@@ -902,25 +909,29 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
|
||||
}
|
||||
|
||||
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
|
||||
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
|
||||
*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
|
||||
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
|
||||
*in = mlx5_vzalloc(*inlen);
|
||||
if (!*in) {
|
||||
err = -ENOMEM;
|
||||
goto err_buf;
|
||||
}
|
||||
(*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
|
||||
(*in)->ctx.log_pg_sz_remote_qpn =
|
||||
cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
|
||||
|
||||
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
|
||||
MLX5_SET(qpc, qpc, uar_page, uar_index);
|
||||
MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
|
||||
/* Set "fast registration enabled" for all kernel QPs */
|
||||
(*in)->ctx.params1 |= cpu_to_be32(1 << 11);
|
||||
(*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
|
||||
MLX5_SET(qpc, qpc, fre, 1);
|
||||
MLX5_SET(qpc, qpc, rlky, 1);
|
||||
|
||||
if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
|
||||
(*in)->ctx.deth_sqpn = cpu_to_be32(1);
|
||||
MLX5_SET(qpc, qpc, deth_sqpn, 1);
|
||||
qp->flags |= MLX5_IB_QP_SQPN_QP1;
|
||||
}
|
||||
|
||||
mlx5_fill_page_array(&qp->buf, (*in)->pas);
|
||||
mlx5_fill_page_array(&qp->buf,
|
||||
(__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas));
|
||||
|
||||
err = mlx5_db_alloc(dev->mdev, &qp->db);
|
||||
if (err) {
|
||||
@@ -974,15 +985,15 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
||||
free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
|
||||
}
|
||||
|
||||
static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
|
||||
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
|
||||
{
|
||||
if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
|
||||
(attr->qp_type == IB_QPT_XRC_INI))
|
||||
return cpu_to_be32(MLX5_SRQ_RQ);
|
||||
return MLX5_SRQ_RQ;
|
||||
else if (!qp->has_rq)
|
||||
return cpu_to_be32(MLX5_ZERO_LEN_RQ);
|
||||
return MLX5_ZERO_LEN_RQ;
|
||||
else
|
||||
return cpu_to_be32(MLX5_NON_ZERO_RQ);
|
||||
return MLX5_NON_ZERO_RQ;
|
||||
}
|
||||
|
||||
static int is_connected(enum ib_qp_type qp_type)
|
||||
@@ -996,13 +1007,10 @@ static int is_connected(enum ib_qp_type qp_type)
|
||||
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_sq *sq, u32 tdn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(create_tis_in)];
|
||||
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
|
||||
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(tisc, tisc, transport_domain, tdn);
|
||||
|
||||
return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
|
||||
}
|
||||
|
||||
@@ -1191,7 +1199,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
||||
}
|
||||
|
||||
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
struct mlx5_create_qp_mbox_in *in,
|
||||
u32 *in,
|
||||
struct ib_pd *pd)
|
||||
{
|
||||
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
|
||||
@@ -1462,18 +1470,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
struct ib_udata *udata, struct mlx5_ib_qp *qp)
|
||||
{
|
||||
struct mlx5_ib_resources *devr = &dev->devr;
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_ib_qp_base *base;
|
||||
struct mlx5_ib_create_qp_resp resp;
|
||||
struct mlx5_create_qp_mbox_in *in;
|
||||
struct mlx5_ib_create_qp ucmd;
|
||||
struct mlx5_ib_cq *send_cq;
|
||||
struct mlx5_ib_cq *recv_cq;
|
||||
unsigned long flags;
|
||||
int inlen = sizeof(*in);
|
||||
int err;
|
||||
u32 uidx = MLX5_IB_DEFAULT_UIDX;
|
||||
struct mlx5_ib_create_qp ucmd;
|
||||
struct mlx5_ib_qp_base *base;
|
||||
void *qpc;
|
||||
u32 *in;
|
||||
int err;
|
||||
|
||||
base = init_attr->qp_type == IB_QPT_RAW_PACKET ?
|
||||
&qp->raw_packet_qp.rq.base :
|
||||
@@ -1601,7 +1609,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
in = mlx5_vzalloc(sizeof(*in));
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1611,26 +1619,29 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
if (is_sqp(init_attr->qp_type))
|
||||
qp->port = init_attr->port_num;
|
||||
|
||||
in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
|
||||
MLX5_QP_PM_MIGRATED << 11);
|
||||
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
|
||||
|
||||
MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
|
||||
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
|
||||
|
||||
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
|
||||
in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
|
||||
MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
|
||||
else
|
||||
in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
|
||||
MLX5_SET(qpc, qpc, latency_sensitive, 1);
|
||||
|
||||
|
||||
if (qp->wq_sig)
|
||||
in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
|
||||
MLX5_SET(qpc, qpc, wq_signature, 1);
|
||||
|
||||
if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
|
||||
in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
|
||||
MLX5_SET(qpc, qpc, block_lb_mc, 1);
|
||||
|
||||
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
|
||||
in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER);
|
||||
MLX5_SET(qpc, qpc, cd_master, 1);
|
||||
if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
|
||||
in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND);
|
||||
MLX5_SET(qpc, qpc, cd_slave_send, 1);
|
||||
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
|
||||
in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV);
|
||||
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
|
||||
|
||||
if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
|
||||
int rcqe_sz;
|
||||
@@ -1640,71 +1651,68 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
|
||||
|
||||
if (rcqe_sz == 128)
|
||||
in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
|
||||
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
|
||||
else
|
||||
in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
|
||||
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
|
||||
|
||||
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
|
||||
if (scqe_sz == 128)
|
||||
in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
|
||||
MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
|
||||
else
|
||||
in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
|
||||
MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
|
||||
}
|
||||
}
|
||||
|
||||
if (qp->rq.wqe_cnt) {
|
||||
in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
|
||||
in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
|
||||
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
|
||||
MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
|
||||
}
|
||||
|
||||
in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
|
||||
MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
|
||||
|
||||
if (qp->sq.wqe_cnt)
|
||||
in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
|
||||
MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
|
||||
else
|
||||
in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
|
||||
MLX5_SET(qpc, qpc, no_sq, 1);
|
||||
|
||||
/* Set default resources */
|
||||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_XRC_TGT:
|
||||
in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
|
||||
in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
|
||||
in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
|
||||
in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
|
||||
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
|
||||
MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
|
||||
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
|
||||
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
|
||||
break;
|
||||
case IB_QPT_XRC_INI:
|
||||
in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
|
||||
in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
|
||||
in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
|
||||
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
|
||||
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
|
||||
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
|
||||
break;
|
||||
default:
|
||||
if (init_attr->srq) {
|
||||
in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
|
||||
in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
|
||||
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
|
||||
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
|
||||
} else {
|
||||
in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
|
||||
in->ctx.rq_type_srqn |=
|
||||
cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
|
||||
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
|
||||
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
|
||||
}
|
||||
}
|
||||
|
||||
if (init_attr->send_cq)
|
||||
in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
|
||||
MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
|
||||
|
||||
if (init_attr->recv_cq)
|
||||
in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
|
||||
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
|
||||
|
||||
in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
|
||||
MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
|
||||
|
||||
if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
|
||||
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
|
||||
/* 0xffffff means we ask to work with cqe version 0 */
|
||||
/* 0xffffff means we ask to work with cqe version 0 */
|
||||
if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
|
||||
MLX5_SET(qpc, qpc, user_index, uidx);
|
||||
}
|
||||
|
||||
/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
|
||||
if (init_attr->qp_type == IB_QPT_UD &&
|
||||
(init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
|
||||
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
|
||||
MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
|
||||
qp->flags |= MLX5_IB_QP_LSO;
|
||||
}
|
||||
@@ -1861,7 +1869,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
||||
{
|
||||
struct mlx5_ib_cq *send_cq, *recv_cq;
|
||||
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
|
||||
struct mlx5_modify_qp_mbox_in *in;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
@@ -1874,16 +1881,12 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
||||
&qp->raw_packet_qp.rq.base :
|
||||
&qp->trans_qp.base;
|
||||
|
||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
||||
if (!in)
|
||||
return;
|
||||
|
||||
if (qp->state != IB_QPS_RESET) {
|
||||
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
|
||||
mlx5_ib_qp_disable_pagefaults(qp);
|
||||
err = mlx5_core_qp_modify(dev->mdev,
|
||||
MLX5_CMD_OP_2RST_QP, in, 0,
|
||||
&base->mqp);
|
||||
MLX5_CMD_OP_2RST_QP, 0,
|
||||
NULL, &base->mqp);
|
||||
} else {
|
||||
err = modify_raw_packet_qp(dev, qp,
|
||||
MLX5_CMD_OP_2RST_QP);
|
||||
@@ -1925,8 +1928,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
||||
base->mqp.qpn);
|
||||
}
|
||||
|
||||
kfree(in);
|
||||
|
||||
if (qp->create_type == MLX5_QP_KERNEL)
|
||||
destroy_qp_kernel(dev, qp);
|
||||
else if (qp->create_type == MLX5_QP_USER)
|
||||
@@ -2512,7 +2513,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
||||
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
|
||||
struct mlx5_ib_cq *send_cq, *recv_cq;
|
||||
struct mlx5_qp_context *context;
|
||||
struct mlx5_modify_qp_mbox_in *in;
|
||||
struct mlx5_ib_pd *pd;
|
||||
enum mlx5_qp_state mlx5_cur, mlx5_new;
|
||||
enum mlx5_qp_optpar optpar;
|
||||
@@ -2521,11 +2521,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
||||
int err;
|
||||
u16 op;
|
||||
|
||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
||||
if (!in)
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return -ENOMEM;
|
||||
|
||||
context = &in->ctx;
|
||||
err = to_mlx5_st(ibqp->qp_type);
|
||||
if (err < 0) {
|
||||
mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
|
||||
@@ -2690,12 +2689,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
||||
op = optab[mlx5_cur][mlx5_new];
|
||||
optpar = ib_mask_to_mlx5_opt(attr_mask);
|
||||
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
|
||||
in->optparam = cpu_to_be32(optpar);
|
||||
|
||||
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
|
||||
err = modify_raw_packet_qp(dev, qp, op);
|
||||
else
|
||||
err = mlx5_core_qp_modify(dev->mdev, op, in, sqd_event,
|
||||
err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
|
||||
&base->mqp);
|
||||
if (err)
|
||||
goto out;
|
||||
@@ -2736,7 +2734,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(in);
|
||||
kfree(context);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -2969,7 +2967,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
|
||||
if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
|
||||
/* KLMs take twice the size of MTTs */
|
||||
ndescs *= 2;
|
||||
|
||||
@@ -3112,9 +3110,9 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
|
||||
|
||||
memset(seg, 0, sizeof(*seg));
|
||||
|
||||
if (mr->access_mode == MLX5_ACCESS_MODE_MTT)
|
||||
if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
|
||||
seg->log2_page_size = ilog2(mr->ibmr.page_size);
|
||||
else if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
|
||||
else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
|
||||
/* KLMs take twice the size of MTTs */
|
||||
ndescs *= 2;
|
||||
|
||||
@@ -3455,7 +3453,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
|
||||
memset(seg, 0, sizeof(*seg));
|
||||
|
||||
seg->flags = get_umr_flags(wr->access_flags) |
|
||||
MLX5_ACCESS_MODE_KLM;
|
||||
MLX5_MKC_ACCESS_MODE_KLMS;
|
||||
seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
|
||||
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
|
||||
MLX5_MKEY_BSF_EN | pdn);
|
||||
@@ -4317,21 +4315,24 @@ static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
|
||||
static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
struct ib_qp_attr *qp_attr)
|
||||
{
|
||||
struct mlx5_query_qp_mbox_out *outb;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
|
||||
struct mlx5_qp_context *context;
|
||||
int mlx5_state;
|
||||
u32 *outb;
|
||||
int err = 0;
|
||||
|
||||
outb = kzalloc(sizeof(*outb), GFP_KERNEL);
|
||||
outb = kzalloc(outlen, GFP_KERNEL);
|
||||
if (!outb)
|
||||
return -ENOMEM;
|
||||
|
||||
context = &outb->ctx;
|
||||
err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
|
||||
sizeof(*outb));
|
||||
outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
|
||||
context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);
|
||||
|
||||
mlx5_state = be32_to_cpu(context->flags) >> 28;
|
||||
|
||||
qp->state = to_ib_qp_state(mlx5_state);
|
||||
|
Reference in New Issue
Block a user