Merge commit 'v2.6.26' into bkl-removal
このコミットが含まれているのは:
@@ -455,8 +455,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
IB_DEVICE_CURR_QP_STATE_MOD |
|
||||
IB_DEVICE_SYS_IMAGE_GUID |
|
||||
IB_DEVICE_ZERO_STAG |
|
||||
IB_DEVICE_MEM_WINDOW |
|
||||
IB_DEVICE_SEND_W_INV);
|
||||
IB_DEVICE_MEM_WINDOW);
|
||||
|
||||
/* Allocate the qptr_array */
|
||||
c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
|
||||
|
@@ -1096,7 +1096,9 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, ch
|
||||
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
|
||||
|
||||
PDBG("%s dev 0x%p\n", __func__, dev);
|
||||
rtnl_lock();
|
||||
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
||||
rtnl_unlock();
|
||||
return sprintf(buf, "%s\n", info.fw_version);
|
||||
}
|
||||
|
||||
@@ -1109,7 +1111,9 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
|
||||
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
|
||||
|
||||
PDBG("%s dev 0x%p\n", __func__, dev);
|
||||
rtnl_lock();
|
||||
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
||||
rtnl_unlock();
|
||||
return sprintf(buf, "%s\n", info.driver);
|
||||
}
|
||||
|
||||
|
@@ -229,7 +229,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
{
|
||||
int err = 0;
|
||||
u8 t3_wr_flit_cnt;
|
||||
u8 uninitialized_var(t3_wr_flit_cnt);
|
||||
enum t3_wr_opcode t3_wr_opcode = 0;
|
||||
enum t3_wr_flags t3_wr_flags;
|
||||
struct iwch_qp *qhp;
|
||||
|
@@ -421,8 +421,10 @@ int ehca_post_send(struct ib_qp *qp,
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(my_qp->state != IB_QPS_RTS)) {
|
||||
ehca_err(qp->device, "QP not in RTS state qpn=%x", qp->qp_num);
|
||||
/* Reject WR if QP is in RESET, INIT or RTR state */
|
||||
if (unlikely(my_qp->state < IB_QPS_RTS)) {
|
||||
ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
|
||||
my_qp->state, qp->qp_num);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@@ -232,6 +232,11 @@ struct ipath_sdma_desc {
|
||||
#define IPATH_SDMA_TXREQ_S_ABORTED 2
|
||||
#define IPATH_SDMA_TXREQ_S_SHUTDOWN 3
|
||||
|
||||
#define IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG (1ull << 63)
|
||||
#define IPATH_SDMA_STATUS_ABORT_IN_PROG (1ull << 62)
|
||||
#define IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE (1ull << 61)
|
||||
#define IPATH_SDMA_STATUS_SCB_EMPTY (1ull << 30)
|
||||
|
||||
/* max dwords in small buffer packet */
|
||||
#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
|
||||
|
||||
|
@@ -1492,6 +1492,10 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
|
||||
goto bail;
|
||||
}
|
||||
|
||||
case IB_MGMT_METHOD_TRAP:
|
||||
case IB_MGMT_METHOD_REPORT:
|
||||
case IB_MGMT_METHOD_REPORT_RESP:
|
||||
case IB_MGMT_METHOD_TRAP_REPRESS:
|
||||
case IB_MGMT_METHOD_GET_RESP:
|
||||
/*
|
||||
* The ib_mad module will call us to process responses
|
||||
|
@@ -263,14 +263,10 @@ static void sdma_abort_task(unsigned long opaque)
|
||||
hwstatus = ipath_read_kreg64(dd,
|
||||
dd->ipath_kregs->kr_senddmastatus);
|
||||
|
||||
if (/* ScoreBoardDrainInProg */
|
||||
test_bit(63, &hwstatus) ||
|
||||
/* AbortInProg */
|
||||
test_bit(62, &hwstatus) ||
|
||||
/* InternalSDmaEnable */
|
||||
test_bit(61, &hwstatus) ||
|
||||
/* ScbEmpty */
|
||||
!test_bit(30, &hwstatus)) {
|
||||
if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG |
|
||||
IPATH_SDMA_STATUS_ABORT_IN_PROG |
|
||||
IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) ||
|
||||
!(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) {
|
||||
if (dd->ipath_sdma_reset_wait > 0) {
|
||||
/* not done shutting down sdma */
|
||||
--dd->ipath_sdma_reset_wait;
|
||||
@@ -345,7 +341,7 @@ resched:
|
||||
* state change
|
||||
*/
|
||||
if (jiffies > dd->ipath_sdma_abort_jiffies) {
|
||||
ipath_dbg("looping with status 0x%016llx\n",
|
||||
ipath_dbg("looping with status 0x%08lx\n",
|
||||
dd->ipath_sdma_status);
|
||||
dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
|
||||
}
|
||||
@@ -615,7 +611,7 @@ void ipath_restart_sdma(struct ipath_devdata *dd)
|
||||
}
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
if (!needed) {
|
||||
ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n",
|
||||
ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n",
|
||||
dd->ipath_sdma_status);
|
||||
goto bail;
|
||||
}
|
||||
|
@@ -407,12 +407,11 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
|
||||
dev->n_pkt_drops++;
|
||||
goto done;
|
||||
}
|
||||
/* XXX Need to free SGEs */
|
||||
wc.opcode = IB_WC_RECV;
|
||||
last_imm:
|
||||
ipath_copy_sge(&qp->r_sge, data, tlen);
|
||||
wc.wr_id = qp->r_wr_id;
|
||||
wc.status = IB_WC_SUCCESS;
|
||||
wc.opcode = IB_WC_RECV;
|
||||
wc.qp = &qp->ibqp;
|
||||
wc.src_qp = qp->remote_qpn;
|
||||
wc.slid = qp->remote_ah_attr.dlid;
|
||||
@@ -514,6 +513,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
|
||||
goto done;
|
||||
}
|
||||
wc.byte_len = qp->r_len;
|
||||
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
|
||||
goto last_imm;
|
||||
|
||||
case OP(RDMA_WRITE_LAST):
|
||||
|
@@ -1494,7 +1494,8 @@ static int ipath_query_device(struct ib_device *ibdev,
|
||||
|
||||
props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
|
||||
IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
|
||||
IB_DEVICE_SYS_IMAGE_GUID;
|
||||
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
|
||||
IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
|
||||
props->page_size_cap = PAGE_SIZE;
|
||||
props->vendor_id = dev->dd->ipath_vendorid;
|
||||
props->vendor_part_id = dev->dd->ipath_deviceid;
|
||||
|
@@ -333,6 +333,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
|
||||
cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
|
||||
send_wqe_overhead(type, qp->flags);
|
||||
|
||||
if (s > dev->dev->caps.max_sq_desc_sz)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Hermon supports shrinking WQEs, such that a single work
|
||||
* request can include multiple units of 1 << wqe_shift. This
|
||||
@@ -372,9 +375,6 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
|
||||
qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
|
||||
|
||||
for (;;) {
|
||||
if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz)
|
||||
return -EINVAL;
|
||||
|
||||
qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
|
||||
|
||||
/*
|
||||
@@ -395,7 +395,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
|
||||
++qp->sq.wqe_shift;
|
||||
}
|
||||
|
||||
qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
|
||||
qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
|
||||
(qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
|
||||
send_wqe_overhead(type, qp->flags)) /
|
||||
sizeof (struct mlx4_wqe_data_seg);
|
||||
|
||||
@@ -411,7 +412,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
|
||||
|
||||
cap->max_send_wr = qp->sq.max_post =
|
||||
(qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
|
||||
cap->max_send_sge = qp->sq.max_gs;
|
||||
cap->max_send_sge = min(qp->sq.max_gs,
|
||||
min(dev->dev->caps.max_sq_sg,
|
||||
dev->dev->caps.max_rq_sg));
|
||||
/* We don't support inline sends for kernel QPs (yet) */
|
||||
cap->max_inline_data = 0;
|
||||
|
||||
@@ -1457,7 +1460,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
unsigned ind;
|
||||
int uninitialized_var(stamp);
|
||||
int uninitialized_var(size);
|
||||
unsigned seglen;
|
||||
unsigned uninitialized_var(seglen);
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&qp->sq.lock, flags);
|
||||
|
@@ -45,6 +45,7 @@
|
||||
#include "mthca_cmd.h"
|
||||
#include "mthca_profile.h"
|
||||
#include "mthca_memfree.h"
|
||||
#include "mthca_wqe.h"
|
||||
|
||||
MODULE_AUTHOR("Roland Dreier");
|
||||
MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
|
||||
@@ -200,7 +201,18 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
|
||||
mdev->limits.gid_table_len = dev_lim->max_gids;
|
||||
mdev->limits.pkey_table_len = dev_lim->max_pkeys;
|
||||
mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
|
||||
mdev->limits.max_sg = dev_lim->max_sg;
|
||||
/*
|
||||
* Need to allow for worst case send WQE overhead and check
|
||||
* whether max_desc_sz imposes a lower limit than max_sg; UD
|
||||
* send has the biggest overhead.
|
||||
*/
|
||||
mdev->limits.max_sg = min_t(int, dev_lim->max_sg,
|
||||
(dev_lim->max_desc_sz -
|
||||
sizeof (struct mthca_next_seg) -
|
||||
(mthca_is_memfree(mdev) ?
|
||||
sizeof (struct mthca_arbel_ud_seg) :
|
||||
sizeof (struct mthca_tavor_ud_seg))) /
|
||||
sizeof (struct mthca_data_seg));
|
||||
mdev->limits.max_wqes = dev_lim->max_qp_sz;
|
||||
mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp;
|
||||
mdev->limits.reserved_qps = dev_lim->reserved_qps;
|
||||
|
@@ -109,7 +109,11 @@ static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_m
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages(gfp_mask, order);
|
||||
/*
|
||||
* Use __GFP_ZERO because buggy firmware assumes ICM pages are
|
||||
* cleared, and subtle failures are seen if they aren't.
|
||||
*/
|
||||
page = alloc_pages(gfp_mask | __GFP_ZERO, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@@ -2456,10 +2456,8 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
if ((page_count!=0)&&(page_count<<12)-(region->offset&(4096-1))>=region->length)
|
||||
goto enough_pages;
|
||||
if ((page_count&0x01FF) == 0) {
|
||||
if (page_count>(1024*512)) {
|
||||
if (page_count >= 1024 * 512) {
|
||||
ib_umem_release(region);
|
||||
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
|
||||
vpbl.pbl_pbase);
|
||||
nes_free_resource(nesadapter,
|
||||
nesadapter->allocated_mrs, stag_index);
|
||||
kfree(nesmr);
|
||||
|
新しいイシューから参照
ユーザーをブロックする