Merge branches 'cxgb4', 'misc', 'mlx4', 'nes' and 'uapi' into for-next
Este commit está contenido en:
@@ -311,6 +311,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
|
||||
if (cq->ibcq.event_handler)
|
||||
cq->ibcq.event_handler(&ib_event,
|
||||
cq->ibcq.cq_context);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
|
@@ -128,9 +128,8 @@ static void stop_ep_timer(struct iwch_ep *ep)
|
||||
{
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
if (!timer_pending(&ep->timer)) {
|
||||
printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
|
||||
WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
|
||||
__func__, ep, ep->com.state);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
del_timer_sync(&ep->timer);
|
||||
@@ -1756,9 +1755,8 @@ static void ep_timeout(unsigned long arg)
|
||||
__state_set(&ep->com, ABORTING);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "%s unexpected state ep %p state %u\n",
|
||||
WARN(1, "%s unexpected state ep %p state %u\n",
|
||||
__func__, ep, ep->com.state);
|
||||
WARN_ON(1);
|
||||
abort = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&ep->com.lock, flags);
|
||||
|
@@ -718,16 +718,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
/*
|
||||
* we ignore most issues after reporting them, but have to specially
|
||||
* handle hardware-disabled chips.
|
||||
*/
|
||||
if (ret == 2) {
|
||||
/* unique error, known to ipath_init_one */
|
||||
ret = -EPERM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* We could bump this to allow for full rcvegrcnt + rcvtidcnt,
|
||||
* but then it no longer nicely fits power of two, and since
|
||||
|
@@ -66,7 +66,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
|
||||
|
||||
static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
|
||||
{
|
||||
return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
|
||||
return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
|
||||
}
|
||||
|
||||
static void *get_cqe(struct mlx4_ib_cq *cq, int n)
|
||||
@@ -77,8 +77,9 @@ static void *get_cqe(struct mlx4_ib_cq *cq, int n)
|
||||
static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
|
||||
{
|
||||
struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
|
||||
struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
|
||||
|
||||
return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
|
||||
return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
|
||||
!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
|
||||
}
|
||||
|
||||
@@ -99,12 +100,13 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
|
||||
err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
|
||||
PAGE_SIZE * 2, &buf->buf);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
buf->entry_size = dev->dev->caps.cqe_size;
|
||||
err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
|
||||
&buf->mtt);
|
||||
if (err)
|
||||
@@ -120,8 +122,7 @@ err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &buf->mtt);
|
||||
|
||||
err_buf:
|
||||
mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
|
||||
&buf->buf);
|
||||
mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
|
||||
|
||||
out:
|
||||
return err;
|
||||
@@ -129,7 +130,7 @@ out:
|
||||
|
||||
static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
|
||||
{
|
||||
mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
|
||||
mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
|
||||
}
|
||||
|
||||
static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
|
||||
@@ -137,8 +138,9 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
|
||||
u64 buf_addr, int cqe)
|
||||
{
|
||||
int err;
|
||||
int cqe_size = dev->dev->caps.cqe_size;
|
||||
|
||||
*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
|
||||
*umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
|
||||
IB_ACCESS_LOCAL_WRITE, 1);
|
||||
if (IS_ERR(*umem))
|
||||
return PTR_ERR(*umem);
|
||||
@@ -331,16 +333,23 @@ static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
|
||||
{
|
||||
struct mlx4_cqe *cqe, *new_cqe;
|
||||
int i;
|
||||
int cqe_size = cq->buf.entry_size;
|
||||
int cqe_inc = cqe_size == 64 ? 1 : 0;
|
||||
|
||||
i = cq->mcq.cons_index;
|
||||
cqe = get_cqe(cq, i & cq->ibcq.cqe);
|
||||
cqe += cqe_inc;
|
||||
|
||||
while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
|
||||
new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
|
||||
(i + 1) & cq->resize_buf->cqe);
|
||||
memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
|
||||
memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
|
||||
new_cqe += cqe_inc;
|
||||
|
||||
new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
|
||||
(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
|
||||
cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
|
||||
cqe += cqe_inc;
|
||||
}
|
||||
++cq->mcq.cons_index;
|
||||
}
|
||||
@@ -438,6 +447,7 @@ err_buf:
|
||||
|
||||
out:
|
||||
mutex_unlock(&cq->resize_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -586,6 +596,9 @@ repoll:
|
||||
if (!cqe)
|
||||
return -EAGAIN;
|
||||
|
||||
if (cq->buf.entry_size == 64)
|
||||
cqe++;
|
||||
|
||||
++cq->mcq.cons_index;
|
||||
|
||||
/*
|
||||
@@ -807,6 +820,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
|
||||
int nfreed = 0;
|
||||
struct mlx4_cqe *cqe, *dest;
|
||||
u8 owner_bit;
|
||||
int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
|
||||
|
||||
/*
|
||||
* First we need to find the current producer index, so we
|
||||
@@ -825,12 +839,16 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
|
||||
*/
|
||||
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
|
||||
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
|
||||
cqe += cqe_inc;
|
||||
|
||||
if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
|
||||
if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
|
||||
mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
|
||||
++nfreed;
|
||||
} else if (nfreed) {
|
||||
dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
|
||||
dest += cqe_inc;
|
||||
|
||||
owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
|
||||
memcpy(dest, cqe, sizeof *cqe);
|
||||
dest->owner_sr_opcode = owner_bit |
|
||||
|
@@ -563,15 +563,24 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx4_ib_ucontext *context;
|
||||
struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
|
||||
struct mlx4_ib_alloc_ucontext_resp resp;
|
||||
int err;
|
||||
|
||||
if (!dev->ib_active)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
resp.qp_tab_size = dev->dev->caps.num_qps;
|
||||
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
|
||||
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
|
||||
if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
|
||||
resp_v3.qp_tab_size = dev->dev->caps.num_qps;
|
||||
resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
|
||||
resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
|
||||
} else {
|
||||
resp.dev_caps = dev->dev->caps.userspace_caps;
|
||||
resp.qp_tab_size = dev->dev->caps.num_qps;
|
||||
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
|
||||
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
|
||||
resp.cqe_size = dev->dev->caps.cqe_size;
|
||||
}
|
||||
|
||||
context = kmalloc(sizeof *context, GFP_KERNEL);
|
||||
if (!context)
|
||||
@@ -586,7 +595,11 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
INIT_LIST_HEAD(&context->db_page_list);
|
||||
mutex_init(&context->db_page_mutex);
|
||||
|
||||
err = ib_copy_to_udata(udata, &resp, sizeof resp);
|
||||
if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
|
||||
err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
|
||||
else
|
||||
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
|
||||
|
||||
if (err) {
|
||||
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
|
||||
kfree(context);
|
||||
@@ -1342,7 +1355,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
|
||||
ibdev->ib_dev.dma_device = &dev->pdev->dev;
|
||||
|
||||
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
|
||||
if (dev->caps.userspace_caps)
|
||||
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
|
||||
else
|
||||
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
|
||||
|
||||
ibdev->ib_dev.uverbs_cmd_mask =
|
||||
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
|
||||
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
|
||||
|
@@ -90,6 +90,7 @@ struct mlx4_ib_xrcd {
|
||||
struct mlx4_ib_cq_buf {
|
||||
struct mlx4_buf buf;
|
||||
struct mlx4_mtt mtt;
|
||||
int entry_size;
|
||||
};
|
||||
|
||||
struct mlx4_ib_cq_resize {
|
||||
|
@@ -40,7 +40,9 @@
|
||||
* Increment this value if any changes that break userspace ABI
|
||||
* compatibility are made.
|
||||
*/
|
||||
#define MLX4_IB_UVERBS_ABI_VERSION 3
|
||||
|
||||
#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3
|
||||
#define MLX4_IB_UVERBS_ABI_VERSION 4
|
||||
|
||||
/*
|
||||
* Make sure that all structs defined in this file remain laid out so
|
||||
@@ -50,12 +52,20 @@
|
||||
* instead.
|
||||
*/
|
||||
|
||||
struct mlx4_ib_alloc_ucontext_resp {
|
||||
struct mlx4_ib_alloc_ucontext_resp_v3 {
|
||||
__u32 qp_tab_size;
|
||||
__u16 bf_reg_size;
|
||||
__u16 bf_regs_per_page;
|
||||
};
|
||||
|
||||
struct mlx4_ib_alloc_ucontext_resp {
|
||||
__u32 dev_caps;
|
||||
__u32 qp_tab_size;
|
||||
__u16 bf_reg_size;
|
||||
__u16 bf_regs_per_page;
|
||||
__u32 cqe_size;
|
||||
};
|
||||
|
||||
struct mlx4_ib_alloc_pd_resp {
|
||||
__u32 pdn;
|
||||
__u32 reserved;
|
||||
|
@@ -629,11 +629,9 @@ static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_a
|
||||
|
||||
case SEND_RDMA_READ_ZERO:
|
||||
default:
|
||||
if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO) {
|
||||
printk(KERN_ERR "%s[%u]: Unsupported RDMA0 len operation=%u\n",
|
||||
__func__, __LINE__, cm_node->send_rdma0_op);
|
||||
WARN_ON(1);
|
||||
}
|
||||
if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO)
|
||||
WARN(1, "Unsupported RDMA0 len operation=%u\n",
|
||||
cm_node->send_rdma0_op);
|
||||
nes_debug(NES_DBG_CM, "Sending first rdma operation.\n");
|
||||
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
|
||||
cpu_to_le32(NES_IWARP_SQ_OP_RDMAR);
|
||||
|
@@ -210,6 +210,9 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
|
||||
}
|
||||
|
||||
while (1) {
|
||||
if (skb_queue_empty(&nesqp->pau_list))
|
||||
goto out;
|
||||
|
||||
seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
|
||||
if (seq == nextseq) {
|
||||
if (skb->len || processacks)
|
||||
@@ -218,14 +221,13 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (skb->next == (struct sk_buff *)&nesqp->pau_list)
|
||||
goto out;
|
||||
|
||||
old_skb = skb;
|
||||
skb = skb->next;
|
||||
skb_unlink(old_skb, &nesqp->pau_list);
|
||||
nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
|
||||
nes_rem_ref_cm_node(nesqp->cm_node);
|
||||
if (skb == (struct sk_buff *)&nesqp->pau_list)
|
||||
goto out;
|
||||
}
|
||||
return skb;
|
||||
|
||||
@@ -245,7 +247,6 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
struct nes_rskb_cb *cb;
|
||||
struct pau_fpdu_info *fpdu_info = NULL;
|
||||
struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
|
||||
unsigned long flags;
|
||||
u32 fpdu_len = 0;
|
||||
u32 tmp_len;
|
||||
int frag_cnt = 0;
|
||||
@@ -260,12 +261,10 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
|
||||
*pau_fpdu_info = NULL;
|
||||
|
||||
spin_lock_irqsave(&nesqp->pau_lock, flags);
|
||||
skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
|
||||
if (!skb) {
|
||||
spin_unlock_irqrestore(&nesqp->pau_lock, flags);
|
||||
if (!skb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
cb = (struct nes_rskb_cb *)&skb->cb[0];
|
||||
if (skb->len) {
|
||||
fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
|
||||
@@ -290,10 +289,9 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
|
||||
skb = nes_get_next_skb(nesdev, nesqp, skb,
|
||||
nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
|
||||
if (!skb) {
|
||||
spin_unlock_irqrestore(&nesqp->pau_lock, flags);
|
||||
if (!skb)
|
||||
goto out;
|
||||
} else if (rst_rcvd) {
|
||||
if (rst_rcvd) {
|
||||
/* rst received in the middle of fpdu */
|
||||
for (; i >= 0; i--) {
|
||||
skb_unlink(frags[i].skb, &nesqp->pau_list);
|
||||
@@ -320,8 +318,6 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
frag_cnt = 1;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&nesqp->pau_lock, flags);
|
||||
|
||||
/* Found one */
|
||||
fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
|
||||
if (fpdu_info == NULL) {
|
||||
@@ -383,9 +379,8 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
|
||||
if (frags[i].skb->len == 0) {
|
||||
/* Pull skb off the list - it will be freed in the callback */
|
||||
spin_lock_irqsave(&nesqp->pau_lock, flags);
|
||||
skb_unlink(frags[i].skb, &nesqp->pau_list);
|
||||
spin_unlock_irqrestore(&nesqp->pau_lock, flags);
|
||||
if (!skb_queue_empty(&nesqp->pau_list))
|
||||
skb_unlink(frags[i].skb, &nesqp->pau_list);
|
||||
} else {
|
||||
/* Last skb still has data so update the seq */
|
||||
iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
|
||||
@@ -414,14 +409,18 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
|
||||
struct pau_fpdu_info *fpdu_info;
|
||||
struct nes_hw_cqp_wqe *cqp_wqe;
|
||||
struct nes_cqp_request *cqp_request;
|
||||
unsigned long flags;
|
||||
u64 u64tmp;
|
||||
u32 u32tmp;
|
||||
int rc;
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&nesqp->pau_lock, flags);
|
||||
rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
|
||||
if (fpdu_info == NULL)
|
||||
if (rc || (fpdu_info == NULL)) {
|
||||
spin_unlock_irqrestore(&nesqp->pau_lock, flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
cqp_request = fpdu_info->cqp_request;
|
||||
cqp_wqe = &cqp_request->cqp_wqe;
|
||||
@@ -447,7 +446,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
|
||||
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
|
||||
lower_32_bits(u64tmp));
|
||||
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
|
||||
upper_32_bits(u64tmp >> 32));
|
||||
upper_32_bits(u64tmp));
|
||||
|
||||
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
|
||||
lower_32_bits(fpdu_info->frags[0].physaddr));
|
||||
@@ -475,6 +474,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
|
||||
|
||||
atomic_set(&cqp_request->refcount, 1);
|
||||
nes_post_cqp_request(nesdev, cqp_request);
|
||||
spin_unlock_irqrestore(&nesqp->pau_lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -649,11 +649,9 @@ static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request
|
||||
nesqp = qh_chg->nesqp;
|
||||
|
||||
/* Should we handle the bad completion */
|
||||
if (cqp_request->major_code) {
|
||||
printk(KERN_ERR PFX "Invalid cqp_request major_code=0x%x\n",
|
||||
if (cqp_request->major_code)
|
||||
WARN(1, PFX "Invalid cqp_request major_code=0x%x\n",
|
||||
cqp_request->major_code);
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
switch (nesqp->pau_state) {
|
||||
case PAU_DEL_QH:
|
||||
|
@@ -944,12 +944,13 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
|
||||
addr,
|
||||
perfect_filter_register_address+(mc_index * 8),
|
||||
mc_nic_index);
|
||||
macaddr_high = ((u16) addr[0]) << 8;
|
||||
macaddr_high += (u16) addr[1];
|
||||
macaddr_low = ((u32) addr[2]) << 24;
|
||||
macaddr_low += ((u32) addr[3]) << 16;
|
||||
macaddr_low += ((u32) addr[4]) << 8;
|
||||
macaddr_low += (u32) addr[5];
|
||||
macaddr_high = ((u8) addr[0]) << 8;
|
||||
macaddr_high += (u8) addr[1];
|
||||
macaddr_low = ((u8) addr[2]) << 24;
|
||||
macaddr_low += ((u8) addr[3]) << 16;
|
||||
macaddr_low += ((u8) addr[4]) << 8;
|
||||
macaddr_low += (u8) addr[5];
|
||||
|
||||
nes_write_indexed(nesdev,
|
||||
perfect_filter_register_address+(mc_index * 8),
|
||||
macaddr_low);
|
||||
|
Referencia en una nueva incidencia
Block a user