ehea/ibm*: Move the IBM drivers
Move the IBM drivers into drivers/net/ethernet/ibm/ and make the necessary Kconfig and Makefile changes. - Renamed ibm_new_emac to emac - Cleaned up Makefile and Kconfig options which referred to IBM_NEW_EMAC to IBM_EMAC - ibmlana driver is a National Semiconductor SONIC driver so it was not moved CC: Christoph Raisch <raisch@de.ibm.com> CC: Santiago Leon <santil@linux.vnet.ibm.com> CC: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: David Gibson <dwg@au1.ibm.com> CC: Kyle Lucke <klucke@us.ibm.com> CC: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
6
drivers/net/ethernet/ibm/ehea/Makefile
Normal file
6
drivers/net/ethernet/ibm/ehea/Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
#
|
||||
# Makefile for the eHEA ethernet device driver for IBM eServer System p
|
||||
#
|
||||
ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o
|
||||
obj-$(CONFIG_EHEA) += ehea.o
|
||||
|
504
drivers/net/ethernet/ibm/ehea/ehea.h
Normal file
504
drivers/net/ethernet/ibm/ehea/ehea.h
Normal file
@@ -0,0 +1,504 @@
|
||||
/*
|
||||
* linux/drivers/net/ehea/ehea.h
|
||||
*
|
||||
* eHEA ethernet device driver for IBM eServer System p
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2006
|
||||
*
|
||||
* Authors:
|
||||
* Christoph Raisch <raisch@de.ibm.com>
|
||||
* Jan-Bernd Themann <themann@de.ibm.com>
|
||||
* Thomas Klein <tklein@de.ibm.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#ifndef __EHEA_H__
|
||||
#define __EHEA_H__
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/inet_lro.h>
|
||||
|
||||
#include <asm/ibmebus.h>
|
||||
#include <asm/abs_addr.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#define DRV_NAME "ehea"
|
||||
#define DRV_VERSION "EHEA_0107"
|
||||
|
||||
/* eHEA capability flags */
|
||||
#define DLPAR_PORT_ADD_REM 1
|
||||
#define DLPAR_MEM_ADD 2
|
||||
#define DLPAR_MEM_REM 4
|
||||
#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
|
||||
|
||||
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
|
||||
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
|
||||
|
||||
#define EHEA_MAX_ENTRIES_RQ1 32767
|
||||
#define EHEA_MAX_ENTRIES_RQ2 16383
|
||||
#define EHEA_MAX_ENTRIES_RQ3 16383
|
||||
#define EHEA_MAX_ENTRIES_SQ 32767
|
||||
#define EHEA_MIN_ENTRIES_QP 127
|
||||
|
||||
#define EHEA_SMALL_QUEUES
|
||||
#define EHEA_NUM_TX_QP 1
|
||||
#define EHEA_LRO_MAX_AGGR 64
|
||||
|
||||
#ifdef EHEA_SMALL_QUEUES
|
||||
#define EHEA_MAX_CQE_COUNT 1023
|
||||
#define EHEA_DEF_ENTRIES_SQ 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ1 4095
|
||||
#define EHEA_DEF_ENTRIES_RQ2 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ3 1023
|
||||
#else
|
||||
#define EHEA_MAX_CQE_COUNT 4080
|
||||
#define EHEA_DEF_ENTRIES_SQ 4080
|
||||
#define EHEA_DEF_ENTRIES_RQ1 8160
|
||||
#define EHEA_DEF_ENTRIES_RQ2 2040
|
||||
#define EHEA_DEF_ENTRIES_RQ3 2040
|
||||
#endif
|
||||
|
||||
#define EHEA_MAX_ENTRIES_EQ 20
|
||||
|
||||
#define EHEA_SG_SQ 2
|
||||
#define EHEA_SG_RQ1 1
|
||||
#define EHEA_SG_RQ2 0
|
||||
#define EHEA_SG_RQ3 0
|
||||
|
||||
#define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
|
||||
#define EHEA_RQ2_PKT_SIZE 1522
|
||||
#define EHEA_L_PKT_SIZE 256 /* low latency */
|
||||
|
||||
#define MAX_LRO_DESCRIPTORS 8
|
||||
|
||||
/* Send completion signaling */
|
||||
|
||||
/* Protection Domain Identifier */
|
||||
#define EHEA_PD_ID 0xaabcdeff
|
||||
|
||||
#define EHEA_RQ2_THRESHOLD 1
|
||||
#define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */
|
||||
|
||||
#define EHEA_SPEED_10G 10000
|
||||
#define EHEA_SPEED_1G 1000
|
||||
#define EHEA_SPEED_100M 100
|
||||
#define EHEA_SPEED_10M 10
|
||||
#define EHEA_SPEED_AUTONEG 0
|
||||
|
||||
/* Broadcast/Multicast registration types */
|
||||
#define EHEA_BCMC_SCOPE_ALL 0x08
|
||||
#define EHEA_BCMC_SCOPE_SINGLE 0x00
|
||||
#define EHEA_BCMC_MULTICAST 0x04
|
||||
#define EHEA_BCMC_BROADCAST 0x00
|
||||
#define EHEA_BCMC_UNTAGGED 0x02
|
||||
#define EHEA_BCMC_TAGGED 0x00
|
||||
#define EHEA_BCMC_VLANID_ALL 0x01
|
||||
#define EHEA_BCMC_VLANID_SINGLE 0x00
|
||||
|
||||
#define EHEA_CACHE_LINE 128
|
||||
|
||||
/* Memory Regions */
|
||||
#define EHEA_MR_ACC_CTRL 0x00800000
|
||||
|
||||
#define EHEA_BUSMAP_START 0x8000000000000000ULL
|
||||
#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
|
||||
#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
|
||||
#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
|
||||
#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
|
||||
#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
|
||||
#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
|
||||
|
||||
|
||||
#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
|
||||
|
||||
/* utility functions */
|
||||
|
||||
void ehea_dump(void *adr, int len, char *msg);
|
||||
|
||||
#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
|
||||
|
||||
#define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
|
||||
|
||||
#define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
|
||||
|
||||
#define EHEA_BMASK_MASK(mask) \
|
||||
(0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
|
||||
|
||||
#define EHEA_BMASK_SET(mask, value) \
|
||||
((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
|
||||
|
||||
#define EHEA_BMASK_GET(mask, value) \
|
||||
(EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
|
||||
|
||||
/*
|
||||
* Generic ehea page
|
||||
*/
|
||||
struct ehea_page {
|
||||
u8 entries[PAGE_SIZE];
|
||||
};
|
||||
|
||||
/*
|
||||
* Generic queue in linux kernel virtual memory
|
||||
*/
|
||||
struct hw_queue {
|
||||
u64 current_q_offset; /* current queue entry */
|
||||
struct ehea_page **queue_pages; /* array of pages belonging to queue */
|
||||
u32 qe_size; /* queue entry size */
|
||||
u32 queue_length; /* queue length allocated in bytes */
|
||||
u32 pagesize;
|
||||
u32 toggle_state; /* toggle flag - per page */
|
||||
u32 reserved; /* 64 bit alignment */
|
||||
};
|
||||
|
||||
/*
|
||||
* For pSeries this is a 64bit memory address where
|
||||
* I/O memory is mapped into CPU address space
|
||||
*/
|
||||
struct h_epa {
|
||||
void __iomem *addr;
|
||||
};
|
||||
|
||||
struct h_epa_user {
|
||||
u64 addr;
|
||||
};
|
||||
|
||||
struct h_epas {
|
||||
struct h_epa kernel; /* kernel space accessible resource,
|
||||
set to 0 if unused */
|
||||
struct h_epa_user user; /* user space accessible resource
|
||||
set to 0 if unused */
|
||||
};
|
||||
|
||||
/*
|
||||
* Memory map data structures
|
||||
*/
|
||||
struct ehea_dir_bmap
|
||||
{
|
||||
u64 ent[EHEA_MAP_ENTRIES];
|
||||
};
|
||||
struct ehea_top_bmap
|
||||
{
|
||||
struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
|
||||
};
|
||||
struct ehea_bmap
|
||||
{
|
||||
struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
|
||||
};
|
||||
|
||||
struct ehea_qp;
|
||||
struct ehea_cq;
|
||||
struct ehea_eq;
|
||||
struct ehea_port;
|
||||
struct ehea_av;
|
||||
|
||||
/*
|
||||
* Queue attributes passed to ehea_create_qp()
|
||||
*/
|
||||
struct ehea_qp_init_attr {
|
||||
/* input parameter */
|
||||
u32 qp_token; /* queue token */
|
||||
u8 low_lat_rq1;
|
||||
u8 signalingtype; /* cqe generation flag */
|
||||
u8 rq_count; /* num of receive queues */
|
||||
u8 eqe_gen; /* eqe generation flag */
|
||||
u16 max_nr_send_wqes; /* max number of send wqes */
|
||||
u16 max_nr_rwqes_rq1; /* max number of receive wqes */
|
||||
u16 max_nr_rwqes_rq2;
|
||||
u16 max_nr_rwqes_rq3;
|
||||
u8 wqe_size_enc_sq;
|
||||
u8 wqe_size_enc_rq1;
|
||||
u8 wqe_size_enc_rq2;
|
||||
u8 wqe_size_enc_rq3;
|
||||
u8 swqe_imm_data_len; /* immediate data length for swqes */
|
||||
u16 port_nr;
|
||||
u16 rq2_threshold;
|
||||
u16 rq3_threshold;
|
||||
u64 send_cq_handle;
|
||||
u64 recv_cq_handle;
|
||||
u64 aff_eq_handle;
|
||||
|
||||
/* output parameter */
|
||||
u32 qp_nr;
|
||||
u16 act_nr_send_wqes;
|
||||
u16 act_nr_rwqes_rq1;
|
||||
u16 act_nr_rwqes_rq2;
|
||||
u16 act_nr_rwqes_rq3;
|
||||
u8 act_wqe_size_enc_sq;
|
||||
u8 act_wqe_size_enc_rq1;
|
||||
u8 act_wqe_size_enc_rq2;
|
||||
u8 act_wqe_size_enc_rq3;
|
||||
u32 nr_sq_pages;
|
||||
u32 nr_rq1_pages;
|
||||
u32 nr_rq2_pages;
|
||||
u32 nr_rq3_pages;
|
||||
u32 liobn_sq;
|
||||
u32 liobn_rq1;
|
||||
u32 liobn_rq2;
|
||||
u32 liobn_rq3;
|
||||
};
|
||||
|
||||
/*
|
||||
* Event Queue attributes, passed as parameter
|
||||
*/
|
||||
struct ehea_eq_attr {
|
||||
u32 type;
|
||||
u32 max_nr_of_eqes;
|
||||
u8 eqe_gen; /* generate eqe flag */
|
||||
u64 eq_handle;
|
||||
u32 act_nr_of_eqes;
|
||||
u32 nr_pages;
|
||||
u32 ist1; /* Interrupt service token */
|
||||
u32 ist2;
|
||||
u32 ist3;
|
||||
u32 ist4;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Event Queue
|
||||
*/
|
||||
struct ehea_eq {
|
||||
struct ehea_adapter *adapter;
|
||||
struct hw_queue hw_queue;
|
||||
u64 fw_handle;
|
||||
struct h_epas epas;
|
||||
spinlock_t spinlock;
|
||||
struct ehea_eq_attr attr;
|
||||
};
|
||||
|
||||
/*
|
||||
* HEA Queues
|
||||
*/
|
||||
struct ehea_qp {
|
||||
struct ehea_adapter *adapter;
|
||||
u64 fw_handle; /* QP handle for firmware calls */
|
||||
struct hw_queue hw_squeue;
|
||||
struct hw_queue hw_rqueue1;
|
||||
struct hw_queue hw_rqueue2;
|
||||
struct hw_queue hw_rqueue3;
|
||||
struct h_epas epas;
|
||||
struct ehea_qp_init_attr init_attr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Completion Queue attributes
|
||||
*/
|
||||
struct ehea_cq_attr {
|
||||
/* input parameter */
|
||||
u32 max_nr_of_cqes;
|
||||
u32 cq_token;
|
||||
u64 eq_handle;
|
||||
|
||||
/* output parameter */
|
||||
u32 act_nr_of_cqes;
|
||||
u32 nr_pages;
|
||||
};
|
||||
|
||||
/*
|
||||
* Completion Queue
|
||||
*/
|
||||
struct ehea_cq {
|
||||
struct ehea_adapter *adapter;
|
||||
u64 fw_handle;
|
||||
struct hw_queue hw_queue;
|
||||
struct h_epas epas;
|
||||
struct ehea_cq_attr attr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Memory Region
|
||||
*/
|
||||
struct ehea_mr {
|
||||
struct ehea_adapter *adapter;
|
||||
u64 handle;
|
||||
u64 vaddr;
|
||||
u32 lkey;
|
||||
};
|
||||
|
||||
/*
|
||||
* Port state information
|
||||
*/
|
||||
struct port_stats {
|
||||
int poll_receive_errors;
|
||||
int queue_stopped;
|
||||
int err_tcp_cksum;
|
||||
int err_ip_cksum;
|
||||
int err_frame_crc;
|
||||
};
|
||||
|
||||
#define EHEA_IRQ_NAME_SIZE 20
|
||||
|
||||
/*
|
||||
* Queue SKB Array
|
||||
*/
|
||||
struct ehea_q_skb_arr {
|
||||
struct sk_buff **arr; /* skb array for queue */
|
||||
int len; /* array length */
|
||||
int index; /* array index */
|
||||
int os_skbs; /* rq2/rq3 only: outstanding skbs */
|
||||
};
|
||||
|
||||
/*
|
||||
* Port resources
|
||||
*/
|
||||
struct ehea_port_res {
|
||||
struct napi_struct napi;
|
||||
struct port_stats p_stats;
|
||||
struct ehea_mr send_mr; /* send memory region */
|
||||
struct ehea_mr recv_mr; /* receive memory region */
|
||||
spinlock_t xmit_lock;
|
||||
struct ehea_port *port;
|
||||
char int_recv_name[EHEA_IRQ_NAME_SIZE];
|
||||
char int_send_name[EHEA_IRQ_NAME_SIZE];
|
||||
struct ehea_qp *qp;
|
||||
struct ehea_cq *send_cq;
|
||||
struct ehea_cq *recv_cq;
|
||||
struct ehea_eq *eq;
|
||||
struct ehea_q_skb_arr rq1_skba;
|
||||
struct ehea_q_skb_arr rq2_skba;
|
||||
struct ehea_q_skb_arr rq3_skba;
|
||||
struct ehea_q_skb_arr sq_skba;
|
||||
int sq_skba_size;
|
||||
spinlock_t netif_queue;
|
||||
int queue_stopped;
|
||||
int swqe_refill_th;
|
||||
atomic_t swqe_avail;
|
||||
int swqe_ll_count;
|
||||
u32 swqe_id_counter;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u32 poll_counter;
|
||||
struct net_lro_mgr lro_mgr;
|
||||
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
|
||||
int sq_restart_flag;
|
||||
};
|
||||
|
||||
|
||||
#define EHEA_MAX_PORTS 16
|
||||
|
||||
#define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle,
|
||||
RecvCQ handle, EQ handle,
|
||||
SendMR handle, RecvMR handle */
|
||||
#define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */
|
||||
#define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */
|
||||
|
||||
struct ehea_adapter {
|
||||
u64 handle;
|
||||
struct platform_device *ofdev;
|
||||
struct ehea_port *port[EHEA_MAX_PORTS];
|
||||
struct ehea_eq *neq; /* notification event queue */
|
||||
struct tasklet_struct neq_tasklet;
|
||||
struct ehea_mr mr;
|
||||
u32 pd; /* protection domain */
|
||||
u64 max_mc_mac; /* max number of multicast mac addresses */
|
||||
int active_ports;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
||||
struct ehea_mc_list {
|
||||
struct list_head list;
|
||||
u64 macaddr;
|
||||
};
|
||||
|
||||
/* kdump support */
|
||||
struct ehea_fw_handle_entry {
|
||||
u64 adh; /* Adapter Handle */
|
||||
u64 fwh; /* Firmware Handle */
|
||||
};
|
||||
|
||||
struct ehea_fw_handle_array {
|
||||
struct ehea_fw_handle_entry *arr;
|
||||
int num_entries;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct ehea_bcmc_reg_entry {
|
||||
u64 adh; /* Adapter Handle */
|
||||
u32 port_id; /* Logical Port Id */
|
||||
u8 reg_type; /* Registration Type */
|
||||
u64 macaddr;
|
||||
};
|
||||
|
||||
struct ehea_bcmc_reg_array {
|
||||
struct ehea_bcmc_reg_entry *arr;
|
||||
int num_entries;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
#define EHEA_PORT_UP 1
|
||||
#define EHEA_PORT_DOWN 0
|
||||
#define EHEA_PHY_LINK_UP 1
|
||||
#define EHEA_PHY_LINK_DOWN 0
|
||||
#define EHEA_MAX_PORT_RES 16
|
||||
struct ehea_port {
|
||||
struct ehea_adapter *adapter; /* adapter that owns this port */
|
||||
struct net_device *netdev;
|
||||
struct net_device_stats stats;
|
||||
struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
|
||||
struct platform_device ofdev; /* Open Firmware Device */
|
||||
struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
|
||||
struct ehea_eq *qp_eq;
|
||||
struct work_struct reset_task;
|
||||
struct mutex port_lock;
|
||||
char int_aff_name[EHEA_IRQ_NAME_SIZE];
|
||||
int allmulti; /* Indicates IFF_ALLMULTI state */
|
||||
int promisc; /* Indicates IFF_PROMISC state */
|
||||
int num_tx_qps;
|
||||
int num_add_tx_qps;
|
||||
int num_mcs;
|
||||
int resets;
|
||||
unsigned long flags;
|
||||
u64 mac_addr;
|
||||
u32 logical_port_id;
|
||||
u32 port_speed;
|
||||
u32 msg_enable;
|
||||
u32 sig_comp_iv;
|
||||
u32 state;
|
||||
u32 lro_max_aggr;
|
||||
u8 phy_link;
|
||||
u8 full_duplex;
|
||||
u8 autoneg;
|
||||
u8 num_def_qps;
|
||||
wait_queue_head_t swqe_avail_wq;
|
||||
wait_queue_head_t restart_wq;
|
||||
};
|
||||
|
||||
struct port_res_cfg {
|
||||
int max_entries_rcq;
|
||||
int max_entries_scq;
|
||||
int max_entries_sq;
|
||||
int max_entries_rq1;
|
||||
int max_entries_rq2;
|
||||
int max_entries_rq3;
|
||||
};
|
||||
|
||||
enum ehea_flag_bits {
|
||||
__EHEA_STOP_XFER,
|
||||
__EHEA_DISABLE_PORT_RESET
|
||||
};
|
||||
|
||||
void ehea_set_ethtool_ops(struct net_device *netdev);
|
||||
int ehea_sense_port_attr(struct ehea_port *port);
|
||||
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
|
||||
|
||||
#endif /* __EHEA_H__ */
|
295
drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
Normal file
295
drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
Normal file
@@ -0,0 +1,295 @@
|
||||
/*
|
||||
* linux/drivers/net/ehea/ehea_ethtool.c
|
||||
*
|
||||
* eHEA ethernet device driver for IBM eServer System p
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2006
|
||||
*
|
||||
* Authors:
|
||||
* Christoph Raisch <raisch@de.ibm.com>
|
||||
* Jan-Bernd Themann <themann@de.ibm.com>
|
||||
* Thomas Klein <tklein@de.ibm.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include "ehea.h"
|
||||
#include "ehea_phyp.h"
|
||||
|
||||
static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
u32 speed;
|
||||
int ret;
|
||||
|
||||
ret = ehea_sense_port_attr(port);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (netif_carrier_ok(dev)) {
|
||||
switch (port->port_speed) {
|
||||
case EHEA_SPEED_10M:
|
||||
speed = SPEED_10;
|
||||
break;
|
||||
case EHEA_SPEED_100M:
|
||||
speed = SPEED_100;
|
||||
break;
|
||||
case EHEA_SPEED_1G:
|
||||
speed = SPEED_1000;
|
||||
break;
|
||||
case EHEA_SPEED_10G:
|
||||
speed = SPEED_10000;
|
||||
break;
|
||||
default:
|
||||
speed = -1;
|
||||
break; /* BUG */
|
||||
}
|
||||
cmd->duplex = port->full_duplex == 1 ?
|
||||
DUPLEX_FULL : DUPLEX_HALF;
|
||||
} else {
|
||||
speed = ~0;
|
||||
cmd->duplex = -1;
|
||||
}
|
||||
ethtool_cmd_speed_set(cmd, speed);
|
||||
|
||||
if (cmd->speed == SPEED_10000) {
|
||||
cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
|
||||
cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
|
||||
cmd->port = PORT_FIBRE;
|
||||
} else {
|
||||
cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
|
||||
| SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
|
||||
| SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
|
||||
| SUPPORTED_TP);
|
||||
cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
|
||||
| ADVERTISED_TP);
|
||||
cmd->port = PORT_TP;
|
||||
}
|
||||
|
||||
cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
int ret = 0;
|
||||
u32 sp;
|
||||
|
||||
if (cmd->autoneg == AUTONEG_ENABLE) {
|
||||
sp = EHEA_SPEED_AUTONEG;
|
||||
goto doit;
|
||||
}
|
||||
|
||||
switch (cmd->speed) {
|
||||
case SPEED_10:
|
||||
if (cmd->duplex == DUPLEX_FULL)
|
||||
sp = H_SPEED_10M_F;
|
||||
else
|
||||
sp = H_SPEED_10M_H;
|
||||
break;
|
||||
|
||||
case SPEED_100:
|
||||
if (cmd->duplex == DUPLEX_FULL)
|
||||
sp = H_SPEED_100M_F;
|
||||
else
|
||||
sp = H_SPEED_100M_H;
|
||||
break;
|
||||
|
||||
case SPEED_1000:
|
||||
if (cmd->duplex == DUPLEX_FULL)
|
||||
sp = H_SPEED_1G_F;
|
||||
else
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
||||
case SPEED_10000:
|
||||
if (cmd->duplex == DUPLEX_FULL)
|
||||
sp = H_SPEED_10G_F;
|
||||
else
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
doit:
|
||||
ret = ehea_set_portspeed(port, sp);
|
||||
|
||||
if (!ret)
|
||||
netdev_info(dev,
|
||||
"Port speed successfully set: %dMbps %s Duplex\n",
|
||||
port->port_speed,
|
||||
port->full_duplex == 1 ? "Full" : "Half");
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ehea_nway_reset(struct net_device *dev)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
|
||||
|
||||
if (!ret)
|
||||
netdev_info(port->netdev,
|
||||
"Port speed successfully set: %dMbps %s Duplex\n",
|
||||
port->port_speed,
|
||||
port->full_duplex == 1 ? "Full" : "Half");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ehea_get_drvinfo(struct net_device *dev,
|
||||
struct ethtool_drvinfo *info)
|
||||
{
|
||||
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
|
||||
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
|
||||
}
|
||||
|
||||
static u32 ehea_get_msglevel(struct net_device *dev)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
return port->msg_enable;
|
||||
}
|
||||
|
||||
static void ehea_set_msglevel(struct net_device *dev, u32 value)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
port->msg_enable = value;
|
||||
}
|
||||
|
||||
static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
|
||||
{"sig_comp_iv"},
|
||||
{"swqe_refill_th"},
|
||||
{"port resets"},
|
||||
{"Receive errors"},
|
||||
{"TCP cksum errors"},
|
||||
{"IP cksum errors"},
|
||||
{"Frame cksum errors"},
|
||||
{"num SQ stopped"},
|
||||
{"SQ stopped"},
|
||||
{"PR0 free_swqes"},
|
||||
{"PR1 free_swqes"},
|
||||
{"PR2 free_swqes"},
|
||||
{"PR3 free_swqes"},
|
||||
{"PR4 free_swqes"},
|
||||
{"PR5 free_swqes"},
|
||||
{"PR6 free_swqes"},
|
||||
{"PR7 free_swqes"},
|
||||
{"LRO aggregated"},
|
||||
{"LRO flushed"},
|
||||
{"LRO no_desc"},
|
||||
};
|
||||
|
||||
static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
{
|
||||
if (stringset == ETH_SS_STATS) {
|
||||
memcpy(data, &ehea_ethtool_stats_keys,
|
||||
sizeof(ehea_ethtool_stats_keys));
|
||||
}
|
||||
}
|
||||
|
||||
static int ehea_get_sset_count(struct net_device *dev, int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
case ETH_SS_STATS:
|
||||
return ARRAY_SIZE(ehea_ethtool_stats_keys);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static void ehea_get_ethtool_stats(struct net_device *dev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
int i, k, tmp;
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
|
||||
for (i = 0; i < ehea_get_sset_count(dev, ETH_SS_STATS); i++)
|
||||
data[i] = 0;
|
||||
i = 0;
|
||||
|
||||
data[i++] = port->sig_comp_iv;
|
||||
data[i++] = port->port_res[0].swqe_refill_th;
|
||||
data[i++] = port->resets;
|
||||
|
||||
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
|
||||
tmp += port->port_res[k].p_stats.poll_receive_errors;
|
||||
data[i++] = tmp;
|
||||
|
||||
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
|
||||
tmp += port->port_res[k].p_stats.err_tcp_cksum;
|
||||
data[i++] = tmp;
|
||||
|
||||
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
|
||||
tmp += port->port_res[k].p_stats.err_ip_cksum;
|
||||
data[i++] = tmp;
|
||||
|
||||
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
|
||||
tmp += port->port_res[k].p_stats.err_frame_crc;
|
||||
data[i++] = tmp;
|
||||
|
||||
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
|
||||
tmp += port->port_res[k].p_stats.queue_stopped;
|
||||
data[i++] = tmp;
|
||||
|
||||
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
|
||||
tmp |= port->port_res[k].queue_stopped;
|
||||
data[i++] = tmp;
|
||||
|
||||
for (k = 0; k < 8; k++)
|
||||
data[i++] = atomic_read(&port->port_res[k].swqe_avail);
|
||||
|
||||
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
|
||||
tmp |= port->port_res[k].lro_mgr.stats.aggregated;
|
||||
data[i++] = tmp;
|
||||
|
||||
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
|
||||
tmp |= port->port_res[k].lro_mgr.stats.flushed;
|
||||
data[i++] = tmp;
|
||||
|
||||
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
|
||||
tmp |= port->port_res[k].lro_mgr.stats.no_desc;
|
||||
data[i++] = tmp;
|
||||
|
||||
}
|
||||
|
||||
const struct ethtool_ops ehea_ethtool_ops = {
|
||||
.get_settings = ehea_get_settings,
|
||||
.get_drvinfo = ehea_get_drvinfo,
|
||||
.get_msglevel = ehea_get_msglevel,
|
||||
.set_msglevel = ehea_set_msglevel,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_strings = ehea_get_strings,
|
||||
.get_sset_count = ehea_get_sset_count,
|
||||
.get_ethtool_stats = ehea_get_ethtool_stats,
|
||||
.set_settings = ehea_set_settings,
|
||||
.nway_reset = ehea_nway_reset, /* Restart autonegotiation */
|
||||
};
|
||||
|
||||
void ehea_set_ethtool_ops(struct net_device *netdev)
|
||||
{
|
||||
SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
|
||||
}
|
292
drivers/net/ethernet/ibm/ehea/ehea_hw.h
Normal file
292
drivers/net/ethernet/ibm/ehea/ehea_hw.h
Normal file
@@ -0,0 +1,292 @@
|
||||
/*
|
||||
* linux/drivers/net/ehea/ehea_hw.h
|
||||
*
|
||||
* eHEA ethernet device driver for IBM eServer System p
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2006
|
||||
*
|
||||
* Authors:
|
||||
* Christoph Raisch <raisch@de.ibm.com>
|
||||
* Jan-Bernd Themann <themann@de.ibm.com>
|
||||
* Thomas Klein <tklein@de.ibm.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#ifndef __EHEA_HW_H__
|
||||
#define __EHEA_HW_H__
|
||||
|
||||
#define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63)
|
||||
#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63)
|
||||
#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63)
|
||||
#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63)
|
||||
|
||||
#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
|
||||
|
||||
struct ehea_qptemm {
|
||||
u64 qpx_hcr;
|
||||
u64 qpx_c;
|
||||
u64 qpx_herr;
|
||||
u64 qpx_aer;
|
||||
u64 qpx_sqa;
|
||||
u64 qpx_sqc;
|
||||
u64 qpx_rq1a;
|
||||
u64 qpx_rq1c;
|
||||
u64 qpx_st;
|
||||
u64 qpx_aerr;
|
||||
u64 qpx_tenure;
|
||||
u64 qpx_reserved1[(0x098 - 0x058) / 8];
|
||||
u64 qpx_portp;
|
||||
u64 qpx_reserved2[(0x100 - 0x0A0) / 8];
|
||||
u64 qpx_t;
|
||||
u64 qpx_sqhp;
|
||||
u64 qpx_sqptp;
|
||||
u64 qpx_reserved3[(0x140 - 0x118) / 8];
|
||||
u64 qpx_sqwsize;
|
||||
u64 qpx_reserved4[(0x170 - 0x148) / 8];
|
||||
u64 qpx_sqsize;
|
||||
u64 qpx_reserved5[(0x1B0 - 0x178) / 8];
|
||||
u64 qpx_sigt;
|
||||
u64 qpx_wqecnt;
|
||||
u64 qpx_rq1hp;
|
||||
u64 qpx_rq1ptp;
|
||||
u64 qpx_rq1size;
|
||||
u64 qpx_reserved6[(0x220 - 0x1D8) / 8];
|
||||
u64 qpx_rq1wsize;
|
||||
u64 qpx_reserved7[(0x240 - 0x228) / 8];
|
||||
u64 qpx_pd;
|
||||
u64 qpx_scqn;
|
||||
u64 qpx_rcqn;
|
||||
u64 qpx_aeqn;
|
||||
u64 reserved49;
|
||||
u64 qpx_ram;
|
||||
u64 qpx_reserved8[(0x300 - 0x270) / 8];
|
||||
u64 qpx_rq2a;
|
||||
u64 qpx_rq2c;
|
||||
u64 qpx_rq2hp;
|
||||
u64 qpx_rq2ptp;
|
||||
u64 qpx_rq2size;
|
||||
u64 qpx_rq2wsize;
|
||||
u64 qpx_rq2th;
|
||||
u64 qpx_rq3a;
|
||||
u64 qpx_rq3c;
|
||||
u64 qpx_rq3hp;
|
||||
u64 qpx_rq3ptp;
|
||||
u64 qpx_rq3size;
|
||||
u64 qpx_rq3wsize;
|
||||
u64 qpx_rq3th;
|
||||
u64 qpx_lpn;
|
||||
u64 qpx_reserved9[(0x400 - 0x378) / 8];
|
||||
u64 reserved_ext[(0x500 - 0x400) / 8];
|
||||
u64 reserved2[(0x1000 - 0x500) / 8];
|
||||
};
|
||||
|
||||
#define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)
|
||||
|
||||
#define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)
|
||||
|
||||
struct ehea_mrmwmm {
|
||||
u64 mrx_hcr;
|
||||
u64 mrx_c;
|
||||
u64 mrx_herr;
|
||||
u64 mrx_aer;
|
||||
u64 mrx_pp;
|
||||
u64 reserved1;
|
||||
u64 reserved2;
|
||||
u64 reserved3;
|
||||
u64 reserved4[(0x200 - 0x40) / 8];
|
||||
u64 mrx_ctl[64];
|
||||
};
|
||||
|
||||
#define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)
|
||||
|
||||
struct ehea_qpedmm {
|
||||
|
||||
u64 reserved0[(0x400) / 8];
|
||||
u64 qpedx_phh;
|
||||
u64 qpedx_ppsgp;
|
||||
u64 qpedx_ppsgu;
|
||||
u64 qpedx_ppdgp;
|
||||
u64 qpedx_ppdgu;
|
||||
u64 qpedx_aph;
|
||||
u64 qpedx_apsgp;
|
||||
u64 qpedx_apsgu;
|
||||
u64 qpedx_apdgp;
|
||||
u64 qpedx_apdgu;
|
||||
u64 qpedx_apav;
|
||||
u64 qpedx_apsav;
|
||||
u64 qpedx_hcr;
|
||||
u64 reserved1[4];
|
||||
u64 qpedx_rrl0;
|
||||
u64 qpedx_rrrkey0;
|
||||
u64 qpedx_rrva0;
|
||||
u64 reserved2;
|
||||
u64 qpedx_rrl1;
|
||||
u64 qpedx_rrrkey1;
|
||||
u64 qpedx_rrva1;
|
||||
u64 reserved3;
|
||||
u64 qpedx_rrl2;
|
||||
u64 qpedx_rrrkey2;
|
||||
u64 qpedx_rrva2;
|
||||
u64 reserved4;
|
||||
u64 qpedx_rrl3;
|
||||
u64 qpedx_rrrkey3;
|
||||
u64 qpedx_rrva3;
|
||||
};
|
||||
|
||||
#define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
|
||||
#define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
|
||||
#define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
|
||||
#define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)
|
||||
|
||||
#define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)
|
||||
|
||||
struct ehea_cqtemm {
|
||||
u64 cqx_hcr;
|
||||
u64 cqx_c;
|
||||
u64 cqx_herr;
|
||||
u64 cqx_aer;
|
||||
u64 cqx_ptp;
|
||||
u64 cqx_tp;
|
||||
u64 cqx_fec;
|
||||
u64 cqx_feca;
|
||||
u64 cqx_ep;
|
||||
u64 cqx_eq;
|
||||
u64 reserved1;
|
||||
u64 cqx_n0;
|
||||
u64 cqx_n1;
|
||||
u64 reserved2[(0x1000 - 0x60) / 8];
|
||||
};
|
||||
|
||||
#define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)
|
||||
|
||||
struct ehea_eqtemm {
|
||||
u64 eqx_hcr;
|
||||
u64 eqx_c;
|
||||
u64 eqx_herr;
|
||||
u64 eqx_aer;
|
||||
u64 eqx_ptp;
|
||||
u64 eqx_tp;
|
||||
u64 eqx_ssba;
|
||||
u64 eqx_psba;
|
||||
u64 eqx_cec;
|
||||
u64 eqx_meql;
|
||||
u64 eqx_xisbi;
|
||||
u64 eqx_xisc;
|
||||
u64 eqx_it;
|
||||
};
|
||||
|
||||
/*
|
||||
* These access functions will be changed when the dissuccsion about
|
||||
* the new access methods for POWER has settled.
|
||||
*/
|
||||
|
||||
static inline u64 epa_load(struct h_epa epa, u32 offset)
|
||||
{
|
||||
return __raw_readq((void __iomem *)(epa.addr + offset));
|
||||
}
|
||||
|
||||
static inline void epa_store(struct h_epa epa, u32 offset, u64 value)
|
||||
{
|
||||
__raw_writeq(value, (void __iomem *)(epa.addr + offset));
|
||||
epa_load(epa, offset); /* synchronize explicitly to eHEA */
|
||||
}
|
||||
|
||||
static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
|
||||
{
|
||||
__raw_writeq(value, (void __iomem *)(epa.addr + offset));
|
||||
}
|
||||
|
||||
#define epa_store_eq(epa, offset, value)\
|
||||
epa_store(epa, EQTEMM_OFFSET(offset), value)
|
||||
#define epa_load_eq(epa, offset)\
|
||||
epa_load(epa, EQTEMM_OFFSET(offset))
|
||||
|
||||
#define epa_store_cq(epa, offset, value)\
|
||||
epa_store(epa, CQTEMM_OFFSET(offset), value)
|
||||
#define epa_load_cq(epa, offset)\
|
||||
epa_load(epa, CQTEMM_OFFSET(offset))
|
||||
|
||||
#define epa_store_qp(epa, offset, value)\
|
||||
epa_store(epa, QPTEMM_OFFSET(offset), value)
|
||||
#define epa_load_qp(epa, offset)\
|
||||
epa_load(epa, QPTEMM_OFFSET(offset))
|
||||
|
||||
#define epa_store_qped(epa, offset, value)\
|
||||
epa_store(epa, QPEDMM_OFFSET(offset), value)
|
||||
#define epa_load_qped(epa, offset)\
|
||||
epa_load(epa, QPEDMM_OFFSET(offset))
|
||||
|
||||
#define epa_store_mrmw(epa, offset, value)\
|
||||
epa_store(epa, MRMWMM_OFFSET(offset), value)
|
||||
#define epa_load_mrmw(epa, offset)\
|
||||
epa_load(epa, MRMWMM_OFFSET(offset))
|
||||
|
||||
#define epa_store_base(epa, offset, value)\
|
||||
epa_store(epa, HCAGR_OFFSET(offset), value)
|
||||
#define epa_load_base(epa, offset)\
|
||||
epa_load(epa, HCAGR_OFFSET(offset))
|
||||
|
||||
static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
|
||||
{
|
||||
struct h_epa epa = qp->epas.kernel;
|
||||
epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa),
|
||||
EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes));
|
||||
}
|
||||
|
||||
static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes)
|
||||
{
|
||||
struct h_epa epa = qp->epas.kernel;
|
||||
epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a),
|
||||
EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes));
|
||||
}
|
||||
|
||||
static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes)
|
||||
{
|
||||
struct h_epa epa = qp->epas.kernel;
|
||||
epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a),
|
||||
EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes));
|
||||
}
|
||||
|
||||
static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes)
|
||||
{
|
||||
struct h_epa epa = qp->epas.kernel;
|
||||
epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a),
|
||||
EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes));
|
||||
}
|
||||
|
||||
static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes)
|
||||
{
|
||||
struct h_epa epa = cq->epas.kernel;
|
||||
epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca),
|
||||
EHEA_BMASK_SET(CQX_FECADDER, nr_cqes));
|
||||
}
|
||||
|
||||
static inline void ehea_reset_cq_n1(struct ehea_cq *cq)
|
||||
{
|
||||
struct h_epa epa = cq->epas.kernel;
|
||||
epa_store_cq(epa, cqx_n1,
|
||||
EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1));
|
||||
}
|
||||
|
||||
static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq)
|
||||
{
|
||||
struct h_epa epa = my_cq->epas.kernel;
|
||||
epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep),
|
||||
EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0));
|
||||
}
|
||||
|
||||
#endif /* __EHEA_HW_H__ */
|
3768
drivers/net/ethernet/ibm/ehea/ehea_main.c
Normal file
3768
drivers/net/ethernet/ibm/ehea/ehea_main.c
Normal file
文件差異過大導致無法顯示
Load Diff
626
drivers/net/ethernet/ibm/ehea/ehea_phyp.c
Normal file
626
drivers/net/ethernet/ibm/ehea/ehea_phyp.c
Normal file
@@ -0,0 +1,626 @@
|
||||
/*
|
||||
* linux/drivers/net/ehea/ehea_phyp.c
|
||||
*
|
||||
* eHEA ethernet device driver for IBM eServer System p
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2006
|
||||
*
|
||||
* Authors:
|
||||
* Christoph Raisch <raisch@de.ibm.com>
|
||||
* Jan-Bernd Themann <themann@de.ibm.com>
|
||||
* Thomas Klein <tklein@de.ibm.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include "ehea_phyp.h"
|
||||
|
||||
|
||||
static inline u16 get_order_of_qentries(u16 queue_entries)
|
||||
{
|
||||
u8 ld = 1; /* logarithmus dualis */
|
||||
while (((1U << ld) - 1) < queue_entries)
|
||||
ld++;
|
||||
return ld - 1;
|
||||
}
|
||||
|
||||
/* Defines for H_CALL H_ALLOC_RESOURCE */
|
||||
#define H_ALL_RES_TYPE_QP 1
|
||||
#define H_ALL_RES_TYPE_CQ 2
|
||||
#define H_ALL_RES_TYPE_EQ 3
|
||||
#define H_ALL_RES_TYPE_MR 5
|
||||
#define H_ALL_RES_TYPE_MW 6
|
||||
|
||||
static long ehea_plpar_hcall_norets(unsigned long opcode,
|
||||
unsigned long arg1,
|
||||
unsigned long arg2,
|
||||
unsigned long arg3,
|
||||
unsigned long arg4,
|
||||
unsigned long arg5,
|
||||
unsigned long arg6,
|
||||
unsigned long arg7)
|
||||
{
|
||||
long ret;
|
||||
int i, sleep_msecs;
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
|
||||
arg5, arg6, arg7);
|
||||
|
||||
if (H_IS_LONG_BUSY(ret)) {
|
||||
sleep_msecs = get_longbusy_msecs(ret);
|
||||
msleep_interruptible(sleep_msecs);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ret < H_SUCCESS)
|
||||
pr_err("opcode=%lx ret=%lx"
|
||||
" arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
|
||||
" arg5=%lx arg6=%lx arg7=%lx\n",
|
||||
opcode, ret,
|
||||
arg1, arg2, arg3, arg4, arg5, arg6, arg7);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return H_BUSY;
|
||||
}
|
||||
|
||||
static long ehea_plpar_hcall9(unsigned long opcode,
|
||||
unsigned long *outs, /* array of 9 outputs */
|
||||
unsigned long arg1,
|
||||
unsigned long arg2,
|
||||
unsigned long arg3,
|
||||
unsigned long arg4,
|
||||
unsigned long arg5,
|
||||
unsigned long arg6,
|
||||
unsigned long arg7,
|
||||
unsigned long arg8,
|
||||
unsigned long arg9)
|
||||
{
|
||||
long ret;
|
||||
int i, sleep_msecs;
|
||||
u8 cb_cat;
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
ret = plpar_hcall9(opcode, outs,
|
||||
arg1, arg2, arg3, arg4, arg5,
|
||||
arg6, arg7, arg8, arg9);
|
||||
|
||||
if (H_IS_LONG_BUSY(ret)) {
|
||||
sleep_msecs = get_longbusy_msecs(ret);
|
||||
msleep_interruptible(sleep_msecs);
|
||||
continue;
|
||||
}
|
||||
|
||||
cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
|
||||
|
||||
if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
|
||||
&& (opcode == H_MODIFY_HEA_PORT))
|
||||
&& (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
|
||||
|| (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
|
||||
&& (arg3 == H_PORT_CB7_DUCQPN)))))
|
||||
pr_err("opcode=%lx ret=%lx"
|
||||
" arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
|
||||
" arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
|
||||
" arg9=%lx"
|
||||
" out1=%lx out2=%lx out3=%lx out4=%lx"
|
||||
" out5=%lx out6=%lx out7=%lx out8=%lx"
|
||||
" out9=%lx\n",
|
||||
opcode, ret,
|
||||
arg1, arg2, arg3, arg4, arg5,
|
||||
arg6, arg7, arg8, arg9,
|
||||
outs[0], outs[1], outs[2], outs[3], outs[4],
|
||||
outs[5], outs[6], outs[7], outs[8]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return H_BUSY;
|
||||
}
|
||||
|
||||
u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
|
||||
const u64 qp_handle, const u64 sel_mask, void *cb_addr)
|
||||
{
|
||||
return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
|
||||
adapter_handle, /* R4 */
|
||||
qp_category, /* R5 */
|
||||
qp_handle, /* R6 */
|
||||
sel_mask, /* R7 */
|
||||
virt_to_abs(cb_addr), /* R8 */
|
||||
0, 0);
|
||||
}
|
||||
|
||||
/* input param R5 */
|
||||
#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
|
||||
#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
|
||||
#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
|
||||
#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
|
||||
#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
|
||||
#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
|
||||
#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
|
||||
#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
|
||||
#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
|
||||
#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
|
||||
|
||||
/* input param R9 */
|
||||
#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
|
||||
#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63)
|
||||
|
||||
/* input param R10 */
|
||||
#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
|
||||
#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
|
||||
#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
|
||||
#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
|
||||
/* Max Send Scatter Gather Elements */
|
||||
#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
|
||||
#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
|
||||
/* Max Receive SG Elements RQ1 */
|
||||
#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
|
||||
#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
|
||||
|
||||
/* input param R11 */
|
||||
#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
|
||||
/* max swqe immediate data length */
|
||||
#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
|
||||
|
||||
/* input param R12 */
|
||||
#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
|
||||
/* Threshold RQ2 */
|
||||
#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
|
||||
/* Threshold RQ3 */
|
||||
|
||||
/* output param R6 */
|
||||
#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
|
||||
#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
|
||||
#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
|
||||
#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
|
||||
|
||||
/* output param, R7 */
|
||||
#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
|
||||
#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
|
||||
#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
|
||||
#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
|
||||
#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
|
||||
|
||||
/* output param R8,R9 */
|
||||
#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
|
||||
#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
|
||||
#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
|
||||
#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
|
||||
|
||||
/* output param R11,R12 */
|
||||
#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
|
||||
#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
|
||||
#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
|
||||
#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
|
||||
|
||||
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
|
||||
struct ehea_qp_init_attr *init_attr, const u32 pd,
|
||||
u64 *qp_handle, struct h_epas *h_epas)
|
||||
{
|
||||
u64 hret;
|
||||
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
|
||||
|
||||
u64 allocate_controls =
|
||||
EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
|
||||
|
||||
u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
|
||||
|
||||
u64 max_r10_reg =
|
||||
EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
|
||||
get_order_of_qentries(init_attr->max_nr_send_wqes))
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
|
||||
get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
|
||||
get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
|
||||
get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
|
||||
init_attr->wqe_size_enc_rq1)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
|
||||
init_attr->wqe_size_enc_rq2)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
|
||||
init_attr->wqe_size_enc_rq3);
|
||||
|
||||
u64 r11_in =
|
||||
EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
|
||||
u64 threshold =
|
||||
EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
|
||||
|
||||
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
|
||||
outs,
|
||||
adapter_handle, /* R4 */
|
||||
allocate_controls, /* R5 */
|
||||
init_attr->send_cq_handle, /* R6 */
|
||||
init_attr->recv_cq_handle, /* R7 */
|
||||
init_attr->aff_eq_handle, /* R8 */
|
||||
r9_reg, /* R9 */
|
||||
max_r10_reg, /* R10 */
|
||||
r11_in, /* R11 */
|
||||
threshold); /* R12 */
|
||||
|
||||
*qp_handle = outs[0];
|
||||
init_attr->qp_nr = (u32)outs[1];
|
||||
|
||||
init_attr->act_nr_send_wqes =
|
||||
(u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
|
||||
init_attr->act_nr_rwqes_rq1 =
|
||||
(u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
|
||||
init_attr->act_nr_rwqes_rq2 =
|
||||
(u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
|
||||
init_attr->act_nr_rwqes_rq3 =
|
||||
(u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
|
||||
|
||||
init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
|
||||
init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
|
||||
init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
|
||||
init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
|
||||
|
||||
init_attr->nr_sq_pages =
|
||||
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
|
||||
init_attr->nr_rq1_pages =
|
||||
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
|
||||
init_attr->nr_rq2_pages =
|
||||
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
|
||||
init_attr->nr_rq3_pages =
|
||||
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
|
||||
|
||||
init_attr->liobn_sq =
|
||||
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
|
||||
init_attr->liobn_rq1 =
|
||||
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
|
||||
init_attr->liobn_rq2 =
|
||||
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
|
||||
init_attr->liobn_rq3 =
|
||||
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
|
||||
|
||||
if (!hret)
|
||||
hcp_epas_ctor(h_epas, outs[6], outs[6]);
|
||||
|
||||
return hret;
|
||||
}
|
||||
|
||||
u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
|
||||
struct ehea_cq_attr *cq_attr,
|
||||
u64 *cq_handle, struct h_epas *epas)
|
||||
{
|
||||
u64 hret;
|
||||
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
|
||||
|
||||
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
|
||||
outs,
|
||||
adapter_handle, /* R4 */
|
||||
H_ALL_RES_TYPE_CQ, /* R5 */
|
||||
cq_attr->eq_handle, /* R6 */
|
||||
cq_attr->cq_token, /* R7 */
|
||||
cq_attr->max_nr_of_cqes, /* R8 */
|
||||
0, 0, 0, 0); /* R9-R12 */
|
||||
|
||||
*cq_handle = outs[0];
|
||||
cq_attr->act_nr_of_cqes = outs[3];
|
||||
cq_attr->nr_pages = outs[4];
|
||||
|
||||
if (!hret)
|
||||
hcp_epas_ctor(epas, outs[5], outs[6]);
|
||||
|
||||
return hret;
|
||||
}
|
||||
|
||||
/* Defines for H_CALL H_ALLOC_RESOURCE */
|
||||
#define H_ALL_RES_TYPE_QP 1
|
||||
#define H_ALL_RES_TYPE_CQ 2
|
||||
#define H_ALL_RES_TYPE_EQ 3
|
||||
#define H_ALL_RES_TYPE_MR 5
|
||||
#define H_ALL_RES_TYPE_MW 6
|
||||
|
||||
/* input param R5 */
|
||||
#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
|
||||
#define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
|
||||
#define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
|
||||
#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
|
||||
/* input param R6 */
|
||||
#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
|
||||
|
||||
/* output param R6 */
|
||||
#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
|
||||
|
||||
/* output param R7 */
|
||||
#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
|
||||
|
||||
/* output param R8 */
|
||||
#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
|
||||
|
||||
/* output param R9 */
|
||||
#define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
|
||||
#define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63)
|
||||
|
||||
/* output param R10 */
|
||||
#define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63)
|
||||
|
||||
/* output param R11 */
|
||||
#define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63)
|
||||
|
||||
/* output param R12 */
|
||||
#define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63)
|
||||
|
||||
u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
|
||||
struct ehea_eq_attr *eq_attr, u64 *eq_handle)
|
||||
{
|
||||
u64 hret, allocate_controls;
|
||||
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
|
||||
|
||||
/* resource type */
|
||||
allocate_controls =
|
||||
EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
|
||||
| EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
|
||||
|
||||
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
|
||||
outs,
|
||||
adapter_handle, /* R4 */
|
||||
allocate_controls, /* R5 */
|
||||
eq_attr->max_nr_of_eqes, /* R6 */
|
||||
0, 0, 0, 0, 0, 0); /* R7-R10 */
|
||||
|
||||
*eq_handle = outs[0];
|
||||
eq_attr->act_nr_of_eqes = outs[3];
|
||||
eq_attr->nr_pages = outs[4];
|
||||
eq_attr->ist1 = outs[5];
|
||||
eq_attr->ist2 = outs[6];
|
||||
eq_attr->ist3 = outs[7];
|
||||
eq_attr->ist4 = outs[8];
|
||||
|
||||
return hret;
|
||||
}
|
||||
|
||||
u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
|
||||
const u64 qp_handle, const u64 sel_mask,
|
||||
void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
|
||||
u16 *out_swr, u16 *out_rwr)
|
||||
{
|
||||
u64 hret;
|
||||
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
|
||||
|
||||
hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
|
||||
outs,
|
||||
adapter_handle, /* R4 */
|
||||
(u64) cat, /* R5 */
|
||||
qp_handle, /* R6 */
|
||||
sel_mask, /* R7 */
|
||||
virt_to_abs(cb_addr), /* R8 */
|
||||
0, 0, 0, 0); /* R9-R12 */
|
||||
|
||||
*inv_attr_id = outs[0];
|
||||
*out_swr = outs[3];
|
||||
*out_rwr = outs[4];
|
||||
*proc_mask = outs[5];
|
||||
|
||||
return hret;
|
||||
}
|
||||
|
||||
u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
|
||||
const u8 queue_type, const u64 resource_handle,
|
||||
const u64 log_pageaddr, u64 count)
|
||||
{
|
||||
u64 reg_control;
|
||||
|
||||
reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
|
||||
| EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
|
||||
|
||||
return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
|
||||
adapter_handle, /* R4 */
|
||||
reg_control, /* R5 */
|
||||
resource_handle, /* R6 */
|
||||
log_pageaddr, /* R7 */
|
||||
count, /* R8 */
|
||||
0, 0); /* R9-R10 */
|
||||
}
|
||||
|
||||
u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
|
||||
const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
|
||||
struct ehea_mr *mr)
|
||||
{
|
||||
u64 hret;
|
||||
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
|
||||
|
||||
hret = ehea_plpar_hcall9(H_REGISTER_SMR,
|
||||
outs,
|
||||
adapter_handle , /* R4 */
|
||||
orig_mr_handle, /* R5 */
|
||||
vaddr_in, /* R6 */
|
||||
(((u64)access_ctrl) << 32ULL), /* R7 */
|
||||
pd, /* R8 */
|
||||
0, 0, 0, 0); /* R9-R12 */
|
||||
|
||||
mr->handle = outs[0];
|
||||
mr->lkey = (u32)outs[2];
|
||||
|
||||
return hret;
|
||||
}
|
||||
|
||||
u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
|
||||
{
|
||||
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
|
||||
|
||||
return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
|
||||
outs,
|
||||
adapter_handle, /* R4 */
|
||||
H_DISABLE_GET_EHEA_WQE_P, /* R5 */
|
||||
qp_handle, /* R6 */
|
||||
0, 0, 0, 0, 0, 0); /* R7-R12 */
|
||||
}
|
||||
|
||||
u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
|
||||
u64 force_bit)
|
||||
{
|
||||
return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
|
||||
adapter_handle, /* R4 */
|
||||
res_handle, /* R5 */
|
||||
force_bit,
|
||||
0, 0, 0, 0); /* R7-R10 */
|
||||
}
|
||||
|
||||
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
|
||||
const u64 length, const u32 access_ctrl,
|
||||
const u32 pd, u64 *mr_handle, u32 *lkey)
|
||||
{
|
||||
u64 hret;
|
||||
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
|
||||
|
||||
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
|
||||
outs,
|
||||
adapter_handle, /* R4 */
|
||||
5, /* R5 */
|
||||
vaddr, /* R6 */
|
||||
length, /* R7 */
|
||||
(((u64) access_ctrl) << 32ULL), /* R8 */
|
||||
pd, /* R9 */
|
||||
0, 0, 0); /* R10-R12 */
|
||||
|
||||
*mr_handle = outs[0];
|
||||
*lkey = (u32)outs[2];
|
||||
return hret;
|
||||
}
|
||||
|
||||
u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
|
||||
const u8 pagesize, const u8 queue_type,
|
||||
const u64 log_pageaddr, const u64 count)
|
||||
{
|
||||
if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
|
||||
pr_err("not on pageboundary\n");
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
return ehea_h_register_rpage(adapter_handle, pagesize,
|
||||
queue_type, mr_handle,
|
||||
log_pageaddr, count);
|
||||
}
|
||||
|
||||
u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
|
||||
{
|
||||
u64 hret, cb_logaddr;
|
||||
|
||||
cb_logaddr = virt_to_abs(cb_addr);
|
||||
|
||||
hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
|
||||
adapter_handle, /* R4 */
|
||||
cb_logaddr, /* R5 */
|
||||
0, 0, 0, 0, 0); /* R6-R10 */
|
||||
#ifdef DEBUG
|
||||
ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
|
||||
#endif
|
||||
return hret;
|
||||
}
|
||||
|
||||
u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
|
||||
const u8 cb_cat, const u64 select_mask,
|
||||
void *cb_addr)
|
||||
{
|
||||
u64 port_info;
|
||||
u64 cb_logaddr = virt_to_abs(cb_addr);
|
||||
u64 arr_index = 0;
|
||||
|
||||
port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
|
||||
| EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
|
||||
|
||||
return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
|
||||
adapter_handle, /* R4 */
|
||||
port_info, /* R5 */
|
||||
select_mask, /* R6 */
|
||||
arr_index, /* R7 */
|
||||
cb_logaddr, /* R8 */
|
||||
0, 0); /* R9-R10 */
|
||||
}
|
||||
|
||||
u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
|
||||
const u8 cb_cat, const u64 select_mask,
|
||||
void *cb_addr)
|
||||
{
|
||||
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
|
||||
u64 port_info;
|
||||
u64 arr_index = 0;
|
||||
u64 cb_logaddr = virt_to_abs(cb_addr);
|
||||
|
||||
port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
|
||||
| EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
|
||||
#ifdef DEBUG
|
||||
ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
|
||||
#endif
|
||||
return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
|
||||
outs,
|
||||
adapter_handle, /* R4 */
|
||||
port_info, /* R5 */
|
||||
select_mask, /* R6 */
|
||||
arr_index, /* R7 */
|
||||
cb_logaddr, /* R8 */
|
||||
0, 0, 0, 0); /* R9-R12 */
|
||||
}
|
||||
|
||||
u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
|
||||
const u8 reg_type, const u64 mc_mac_addr,
|
||||
const u16 vlan_id, const u32 hcall_id)
|
||||
{
|
||||
u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
|
||||
u64 mac_addr = mc_mac_addr >> 16;
|
||||
|
||||
r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
|
||||
r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
|
||||
r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
|
||||
r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
|
||||
|
||||
return ehea_plpar_hcall_norets(hcall_id,
|
||||
adapter_handle, /* R4 */
|
||||
r5_port_num, /* R5 */
|
||||
r6_reg_type, /* R6 */
|
||||
r7_mc_mac_addr, /* R7 */
|
||||
r8_vlan_id, /* R8 */
|
||||
0, 0); /* R9-R12 */
|
||||
}
|
||||
|
||||
u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
|
||||
const u64 event_mask)
|
||||
{
|
||||
return ehea_plpar_hcall_norets(H_RESET_EVENTS,
|
||||
adapter_handle, /* R4 */
|
||||
neq_handle, /* R5 */
|
||||
event_mask, /* R6 */
|
||||
0, 0, 0, 0); /* R7-R12 */
|
||||
}
|
||||
|
||||
u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
|
||||
void *rblock)
|
||||
{
|
||||
return ehea_plpar_hcall_norets(H_ERROR_DATA,
|
||||
adapter_handle, /* R4 */
|
||||
ressource_handle, /* R5 */
|
||||
virt_to_abs(rblock), /* R6 */
|
||||
0, 0, 0, 0); /* R7-R12 */
|
||||
}
|
467
drivers/net/ethernet/ibm/ehea/ehea_phyp.h
Normal file
467
drivers/net/ethernet/ibm/ehea/ehea_phyp.h
Normal file
@@ -0,0 +1,467 @@
|
||||
/*
|
||||
* linux/drivers/net/ehea/ehea_phyp.h
|
||||
*
|
||||
* eHEA ethernet device driver for IBM eServer System p
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2006
|
||||
*
|
||||
* Authors:
|
||||
* Christoph Raisch <raisch@de.ibm.com>
|
||||
* Jan-Bernd Themann <themann@de.ibm.com>
|
||||
* Thomas Klein <tklein@de.ibm.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#ifndef __EHEA_PHYP_H__
|
||||
#define __EHEA_PHYP_H__
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <asm/hvcall.h>
|
||||
#include "ehea.h"
|
||||
#include "ehea_hw.h"
|
||||
|
||||
/* Some abbreviations used here:
|
||||
*
|
||||
* hcp_* - structures, variables and functions releated to Hypervisor Calls
|
||||
*/
|
||||
|
||||
static inline u32 get_longbusy_msecs(int long_busy_ret_code)
|
||||
{
|
||||
switch (long_busy_ret_code) {
|
||||
case H_LONG_BUSY_ORDER_1_MSEC:
|
||||
return 1;
|
||||
case H_LONG_BUSY_ORDER_10_MSEC:
|
||||
return 10;
|
||||
case H_LONG_BUSY_ORDER_100_MSEC:
|
||||
return 100;
|
||||
case H_LONG_BUSY_ORDER_1_SEC:
|
||||
return 1000;
|
||||
case H_LONG_BUSY_ORDER_10_SEC:
|
||||
return 10000;
|
||||
case H_LONG_BUSY_ORDER_100_SEC:
|
||||
return 100000;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
|
||||
#define EHEA_MAX_RPAGE 512
|
||||
|
||||
/* Notification Event Queue (NEQ) Entry bit masks */
|
||||
#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
|
||||
#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
|
||||
#define NEQE_PORT_UP EHEA_BMASK_IBM(16, 16)
|
||||
#define NEQE_EXTSWITCH_PORT_UP EHEA_BMASK_IBM(17, 17)
|
||||
#define NEQE_EXTSWITCH_PRIMARY EHEA_BMASK_IBM(18, 18)
|
||||
#define NEQE_PLID EHEA_BMASK_IBM(16, 47)
|
||||
|
||||
/* Notification Event Codes */
|
||||
#define EHEA_EC_PORTSTATE_CHG 0x30
|
||||
#define EHEA_EC_ADAPTER_MALFUNC 0x32
|
||||
#define EHEA_EC_PORT_MALFUNC 0x33
|
||||
|
||||
/* Notification Event Log Register (NELR) bit masks */
|
||||
#define NELR_PORT_MALFUNC EHEA_BMASK_IBM(61, 61)
|
||||
#define NELR_ADAPTER_MALFUNC EHEA_BMASK_IBM(62, 62)
|
||||
#define NELR_PORTSTATE_CHG EHEA_BMASK_IBM(63, 63)
|
||||
|
||||
static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
|
||||
u64 paddr_user)
|
||||
{
|
||||
/* To support 64k pages we must round to 64k page boundary */
|
||||
epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
|
||||
(paddr_kernel & ~PAGE_MASK);
|
||||
epas->user.addr = paddr_user;
|
||||
}
|
||||
|
||||
static inline void hcp_epas_dtor(struct h_epas *epas)
|
||||
{
|
||||
if (epas->kernel.addr)
|
||||
iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
|
||||
|
||||
epas->user.addr = 0;
|
||||
epas->kernel.addr = 0;
|
||||
}
|
||||
|
||||
struct hcp_modify_qp_cb0 {
|
||||
u64 qp_ctl_reg; /* 00 */
|
||||
u32 max_swqe; /* 02 */
|
||||
u32 max_rwqe; /* 03 */
|
||||
u32 port_nb; /* 04 */
|
||||
u32 reserved0; /* 05 */
|
||||
u64 qp_aer; /* 06 */
|
||||
u64 qp_tenure; /* 08 */
|
||||
};
|
||||
|
||||
/* Hcall Query/Modify Queue Pair Control Block 0 Selection Mask Bits */
|
||||
#define H_QPCB0_ALL EHEA_BMASK_IBM(0, 5)
|
||||
#define H_QPCB0_QP_CTL_REG EHEA_BMASK_IBM(0, 0)
|
||||
#define H_QPCB0_MAX_SWQE EHEA_BMASK_IBM(1, 1)
|
||||
#define H_QPCB0_MAX_RWQE EHEA_BMASK_IBM(2, 2)
|
||||
#define H_QPCB0_PORT_NB EHEA_BMASK_IBM(3, 3)
|
||||
#define H_QPCB0_QP_AER EHEA_BMASK_IBM(4, 4)
|
||||
#define H_QPCB0_QP_TENURE EHEA_BMASK_IBM(5, 5)
|
||||
|
||||
/* Queue Pair Control Register Status Bits */
|
||||
#define H_QP_CR_ENABLED 0x8000000000000000ULL /* QP enabled */
|
||||
/* QP States: */
|
||||
#define H_QP_CR_STATE_RESET 0x0000010000000000ULL /* Reset */
|
||||
#define H_QP_CR_STATE_INITIALIZED 0x0000020000000000ULL /* Initialized */
|
||||
#define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */
|
||||
#define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */
|
||||
#define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */
|
||||
#define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */
|
||||
|
||||
struct hcp_modify_qp_cb1 {
|
||||
u32 qpn; /* 00 */
|
||||
u32 qp_asyn_ev_eq_nb; /* 01 */
|
||||
u64 sq_cq_handle; /* 02 */
|
||||
u64 rq_cq_handle; /* 04 */
|
||||
/* sgel = scatter gather element */
|
||||
u32 sgel_nb_sq; /* 06 */
|
||||
u32 sgel_nb_rq1; /* 07 */
|
||||
u32 sgel_nb_rq2; /* 08 */
|
||||
u32 sgel_nb_rq3; /* 09 */
|
||||
};
|
||||
|
||||
/* Hcall Query/Modify Queue Pair Control Block 1 Selection Mask Bits */
|
||||
#define H_QPCB1_ALL EHEA_BMASK_IBM(0, 7)
|
||||
#define H_QPCB1_QPN EHEA_BMASK_IBM(0, 0)
|
||||
#define H_QPCB1_ASYN_EV_EQ_NB EHEA_BMASK_IBM(1, 1)
|
||||
#define H_QPCB1_SQ_CQ_HANDLE EHEA_BMASK_IBM(2, 2)
|
||||
#define H_QPCB1_RQ_CQ_HANDLE EHEA_BMASK_IBM(3, 3)
|
||||
#define H_QPCB1_SGEL_NB_SQ EHEA_BMASK_IBM(4, 4)
|
||||
#define H_QPCB1_SGEL_NB_RQ1 EHEA_BMASK_IBM(5, 5)
|
||||
#define H_QPCB1_SGEL_NB_RQ2 EHEA_BMASK_IBM(6, 6)
|
||||
#define H_QPCB1_SGEL_NB_RQ3 EHEA_BMASK_IBM(7, 7)
|
||||
|
||||
struct hcp_query_ehea {
|
||||
u32 cur_num_qps; /* 00 */
|
||||
u32 cur_num_cqs; /* 01 */
|
||||
u32 cur_num_eqs; /* 02 */
|
||||
u32 cur_num_mrs; /* 03 */
|
||||
u32 auth_level; /* 04 */
|
||||
u32 max_num_qps; /* 05 */
|
||||
u32 max_num_cqs; /* 06 */
|
||||
u32 max_num_eqs; /* 07 */
|
||||
u32 max_num_mrs; /* 08 */
|
||||
u32 reserved0; /* 09 */
|
||||
u32 int_clock_freq; /* 10 */
|
||||
u32 max_num_pds; /* 11 */
|
||||
u32 max_num_addr_handles; /* 12 */
|
||||
u32 max_num_cqes; /* 13 */
|
||||
u32 max_num_wqes; /* 14 */
|
||||
u32 max_num_sgel_rq1wqe; /* 15 */
|
||||
u32 max_num_sgel_rq2wqe; /* 16 */
|
||||
u32 max_num_sgel_rq3wqe; /* 17 */
|
||||
u32 mr_page_size; /* 18 */
|
||||
u32 reserved1; /* 19 */
|
||||
u64 max_mr_size; /* 20 */
|
||||
u64 reserved2; /* 22 */
|
||||
u32 num_ports; /* 24 */
|
||||
u32 reserved3; /* 25 */
|
||||
u32 reserved4; /* 26 */
|
||||
u32 reserved5; /* 27 */
|
||||
u64 max_mc_mac; /* 28 */
|
||||
u64 ehea_cap; /* 30 */
|
||||
u32 max_isn_per_eq; /* 32 */
|
||||
u32 max_num_neq; /* 33 */
|
||||
u64 max_num_vlan_ids; /* 34 */
|
||||
u32 max_num_port_group; /* 36 */
|
||||
u32 max_num_phys_port; /* 37 */
|
||||
|
||||
};
|
||||
|
||||
/* Hcall Query/Modify Port Control Block defines */
|
||||
#define H_PORT_CB0 0
|
||||
#define H_PORT_CB1 1
|
||||
#define H_PORT_CB2 2
|
||||
#define H_PORT_CB3 3
|
||||
#define H_PORT_CB4 4
|
||||
#define H_PORT_CB5 5
|
||||
#define H_PORT_CB6 6
|
||||
#define H_PORT_CB7 7
|
||||
|
||||
struct hcp_ehea_port_cb0 {
|
||||
u64 port_mac_addr;
|
||||
u64 port_rc;
|
||||
u64 reserved0;
|
||||
u32 port_op_state;
|
||||
u32 port_speed;
|
||||
u32 ext_swport_op_state;
|
||||
u32 neg_tpf_prpf;
|
||||
u32 num_default_qps;
|
||||
u32 reserved1;
|
||||
u64 default_qpn_arr[16];
|
||||
};
|
||||
|
||||
/* Hcall Query/Modify Port Control Block 0 Selection Mask Bits */
|
||||
#define H_PORT_CB0_ALL EHEA_BMASK_IBM(0, 7) /* Set all bits */
|
||||
#define H_PORT_CB0_MAC EHEA_BMASK_IBM(0, 0) /* MAC address */
|
||||
#define H_PORT_CB0_PRC EHEA_BMASK_IBM(1, 1) /* Port Recv Control */
|
||||
#define H_PORT_CB0_DEFQPNARRAY EHEA_BMASK_IBM(7, 7) /* Default QPN Array */
|
||||
|
||||
/* Hcall Query Port: Returned port speed values */
|
||||
#define H_SPEED_10M_H 1 /* 10 Mbps, Half Duplex */
|
||||
#define H_SPEED_10M_F 2 /* 10 Mbps, Full Duplex */
|
||||
#define H_SPEED_100M_H 3 /* 100 Mbps, Half Duplex */
|
||||
#define H_SPEED_100M_F 4 /* 100 Mbps, Full Duplex */
|
||||
#define H_SPEED_1G_F 6 /* 1 Gbps, Full Duplex */
|
||||
#define H_SPEED_10G_F 8 /* 10 Gbps, Full Duplex */
|
||||
|
||||
/* Port Receive Control Status Bits */
|
||||
#define PXLY_RC_VALID EHEA_BMASK_IBM(49, 49)
|
||||
#define PXLY_RC_VLAN_XTRACT EHEA_BMASK_IBM(50, 50)
|
||||
#define PXLY_RC_TCP_6_TUPLE EHEA_BMASK_IBM(51, 51)
|
||||
#define PXLY_RC_UDP_6_TUPLE EHEA_BMASK_IBM(52, 52)
|
||||
#define PXLY_RC_TCP_3_TUPLE EHEA_BMASK_IBM(53, 53)
|
||||
#define PXLY_RC_TCP_2_TUPLE EHEA_BMASK_IBM(54, 54)
|
||||
#define PXLY_RC_LLC_SNAP EHEA_BMASK_IBM(55, 55)
|
||||
#define PXLY_RC_JUMBO_FRAME EHEA_BMASK_IBM(56, 56)
|
||||
#define PXLY_RC_FRAG_IP_PKT EHEA_BMASK_IBM(57, 57)
|
||||
#define PXLY_RC_TCP_UDP_CHKSUM EHEA_BMASK_IBM(58, 58)
|
||||
#define PXLY_RC_IP_CHKSUM EHEA_BMASK_IBM(59, 59)
|
||||
#define PXLY_RC_MAC_FILTER EHEA_BMASK_IBM(60, 60)
|
||||
#define PXLY_RC_UNTAG_FILTER EHEA_BMASK_IBM(61, 61)
|
||||
#define PXLY_RC_VLAN_TAG_FILTER EHEA_BMASK_IBM(62, 63)
|
||||
|
||||
#define PXLY_RC_VLAN_FILTER 2
|
||||
#define PXLY_RC_VLAN_PERM 0
|
||||
|
||||
|
||||
#define H_PORT_CB1_ALL 0x8000000000000000ULL
|
||||
|
||||
struct hcp_ehea_port_cb1 {
|
||||
u64 vlan_filter[64];
|
||||
};
|
||||
|
||||
#define H_PORT_CB2_ALL 0xFFE0000000000000ULL
|
||||
|
||||
struct hcp_ehea_port_cb2 {
|
||||
u64 rxo;
|
||||
u64 rxucp;
|
||||
u64 rxufd;
|
||||
u64 rxuerr;
|
||||
u64 rxftl;
|
||||
u64 rxmcp;
|
||||
u64 rxbcp;
|
||||
u64 txo;
|
||||
u64 txucp;
|
||||
u64 txmcp;
|
||||
u64 txbcp;
|
||||
};
|
||||
|
||||
struct hcp_ehea_port_cb3 {
|
||||
u64 vlan_bc_filter[64];
|
||||
u64 vlan_mc_filter[64];
|
||||
u64 vlan_un_filter[64];
|
||||
u64 port_mac_hash_array[64];
|
||||
};
|
||||
|
||||
#define H_PORT_CB4_ALL 0xF000000000000000ULL
|
||||
#define H_PORT_CB4_JUMBO 0x1000000000000000ULL
|
||||
#define H_PORT_CB4_SPEED 0x8000000000000000ULL
|
||||
|
||||
struct hcp_ehea_port_cb4 {
|
||||
u32 port_speed;
|
||||
u32 pause_frame;
|
||||
u32 ens_port_op_state;
|
||||
u32 jumbo_frame;
|
||||
u32 ens_port_wrap;
|
||||
};
|
||||
|
||||
/* Hcall Query/Modify Port Control Block 5 Selection Mask Bits */
|
||||
#define H_PORT_CB5_RCU 0x0001000000000000ULL
|
||||
#define PXS_RCU EHEA_BMASK_IBM(61, 63)
|
||||
|
||||
struct hcp_ehea_port_cb5 {
|
||||
u64 prc; /* 00 */
|
||||
u64 uaa; /* 01 */
|
||||
u64 macvc; /* 02 */
|
||||
u64 xpcsc; /* 03 */
|
||||
u64 xpcsp; /* 04 */
|
||||
u64 pcsid; /* 05 */
|
||||
u64 xpcsst; /* 06 */
|
||||
u64 pthlb; /* 07 */
|
||||
u64 pthrb; /* 08 */
|
||||
u64 pqu; /* 09 */
|
||||
u64 pqd; /* 10 */
|
||||
u64 prt; /* 11 */
|
||||
u64 wsth; /* 12 */
|
||||
u64 rcb; /* 13 */
|
||||
u64 rcm; /* 14 */
|
||||
u64 rcu; /* 15 */
|
||||
u64 macc; /* 16 */
|
||||
u64 pc; /* 17 */
|
||||
u64 pst; /* 18 */
|
||||
u64 ducqpn; /* 19 */
|
||||
u64 mcqpn; /* 20 */
|
||||
u64 mma; /* 21 */
|
||||
u64 pmc0h; /* 22 */
|
||||
u64 pmc0l; /* 23 */
|
||||
u64 lbc; /* 24 */
|
||||
};
|
||||
|
||||
#define H_PORT_CB6_ALL 0xFFFFFE7FFFFF8000ULL
|
||||
|
||||
struct hcp_ehea_port_cb6 {
|
||||
u64 rxo; /* 00 */
|
||||
u64 rx64; /* 01 */
|
||||
u64 rx65; /* 02 */
|
||||
u64 rx128; /* 03 */
|
||||
u64 rx256; /* 04 */
|
||||
u64 rx512; /* 05 */
|
||||
u64 rx1024; /* 06 */
|
||||
u64 rxbfcs; /* 07 */
|
||||
u64 rxime; /* 08 */
|
||||
u64 rxrle; /* 09 */
|
||||
u64 rxorle; /* 10 */
|
||||
u64 rxftl; /* 11 */
|
||||
u64 rxjab; /* 12 */
|
||||
u64 rxse; /* 13 */
|
||||
u64 rxce; /* 14 */
|
||||
u64 rxrf; /* 15 */
|
||||
u64 rxfrag; /* 16 */
|
||||
u64 rxuoc; /* 17 */
|
||||
u64 rxcpf; /* 18 */
|
||||
u64 rxsb; /* 19 */
|
||||
u64 rxfd; /* 20 */
|
||||
u64 rxoerr; /* 21 */
|
||||
u64 rxaln; /* 22 */
|
||||
u64 ducqpn; /* 23 */
|
||||
u64 reserved0; /* 24 */
|
||||
u64 rxmcp; /* 25 */
|
||||
u64 rxbcp; /* 26 */
|
||||
u64 txmcp; /* 27 */
|
||||
u64 txbcp; /* 28 */
|
||||
u64 txo; /* 29 */
|
||||
u64 tx64; /* 30 */
|
||||
u64 tx65; /* 31 */
|
||||
u64 tx128; /* 32 */
|
||||
u64 tx256; /* 33 */
|
||||
u64 tx512; /* 34 */
|
||||
u64 tx1024; /* 35 */
|
||||
u64 txbfcs; /* 36 */
|
||||
u64 txcpf; /* 37 */
|
||||
u64 txlf; /* 38 */
|
||||
u64 txrf; /* 39 */
|
||||
u64 txime; /* 40 */
|
||||
u64 txsc; /* 41 */
|
||||
u64 txmc; /* 42 */
|
||||
u64 txsqe; /* 43 */
|
||||
u64 txdef; /* 44 */
|
||||
u64 txlcol; /* 45 */
|
||||
u64 txexcol; /* 46 */
|
||||
u64 txcse; /* 47 */
|
||||
u64 txbor; /* 48 */
|
||||
};
|
||||
|
||||
#define H_PORT_CB7_DUCQPN 0x8000000000000000ULL
|
||||
|
||||
struct hcp_ehea_port_cb7 {
|
||||
u64 def_uc_qpn;
|
||||
};
|
||||
|
||||
u64 ehea_h_query_ehea_qp(const u64 adapter_handle,
|
||||
const u8 qp_category,
|
||||
const u64 qp_handle, const u64 sel_mask,
|
||||
void *cb_addr);
|
||||
|
||||
u64 ehea_h_modify_ehea_qp(const u64 adapter_handle,
|
||||
const u8 cat,
|
||||
const u64 qp_handle,
|
||||
const u64 sel_mask,
|
||||
void *cb_addr,
|
||||
u64 *inv_attr_id,
|
||||
u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
|
||||
|
||||
u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
|
||||
struct ehea_eq_attr *eq_attr, u64 *eq_handle);
|
||||
|
||||
u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
|
||||
struct ehea_cq_attr *cq_attr,
|
||||
u64 *cq_handle, struct h_epas *epas);
|
||||
|
||||
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
|
||||
struct ehea_qp_init_attr *init_attr,
|
||||
const u32 pd,
|
||||
u64 *qp_handle, struct h_epas *h_epas);
|
||||
|
||||
#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55)
|
||||
#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63)
|
||||
|
||||
u64 ehea_h_register_rpage(const u64 adapter_handle,
|
||||
const u8 pagesize,
|
||||
const u8 queue_type,
|
||||
const u64 resource_handle,
|
||||
const u64 log_pageaddr, u64 count);
|
||||
|
||||
#define H_DISABLE_GET_EHEA_WQE_P 1
|
||||
#define H_DISABLE_GET_SQ_WQE_P 2
|
||||
#define H_DISABLE_GET_RQC 3
|
||||
|
||||
u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle);
|
||||
|
||||
#define FORCE_FREE 1
|
||||
#define NORMAL_FREE 0
|
||||
|
||||
u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
|
||||
u64 force_bit);
|
||||
|
||||
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
|
||||
const u64 length, const u32 access_ctrl,
|
||||
const u32 pd, u64 *mr_handle, u32 *lkey);
|
||||
|
||||
u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
|
||||
const u8 pagesize, const u8 queue_type,
|
||||
const u64 log_pageaddr, const u64 count);
|
||||
|
||||
u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
|
||||
const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
|
||||
struct ehea_mr *mr);
|
||||
|
||||
u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
|
||||
|
||||
/* output param R5 */
|
||||
#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47)
|
||||
#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63)
|
||||
|
||||
u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
|
||||
const u8 cb_cat, const u64 select_mask,
|
||||
void *cb_addr);
|
||||
|
||||
u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
|
||||
const u8 cb_cat, const u64 select_mask,
|
||||
void *cb_addr);
|
||||
|
||||
#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
|
||||
#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63)
|
||||
#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
|
||||
#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
|
||||
|
||||
u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
|
||||
const u8 reg_type, const u64 mc_mac_addr,
|
||||
const u16 vlan_id, const u32 hcall_id);
|
||||
|
||||
u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
|
||||
const u64 event_mask);
|
||||
|
||||
u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
|
||||
void *rblock);
|
||||
|
||||
#endif /* __EHEA_PHYP_H__ */
|
1031
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
Normal file
1031
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
Normal file
文件差異過大導致無法顯示
Load Diff
404
drivers/net/ethernet/ibm/ehea/ehea_qmr.h
Normal file
404
drivers/net/ethernet/ibm/ehea/ehea_qmr.h
Normal file
@@ -0,0 +1,404 @@
|
||||
/*
|
||||
* linux/drivers/net/ehea/ehea_qmr.h
|
||||
*
|
||||
* eHEA ethernet device driver for IBM eServer System p
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2006
|
||||
*
|
||||
* Authors:
|
||||
* Christoph Raisch <raisch@de.ibm.com>
|
||||
* Jan-Bernd Themann <themann@de.ibm.com>
|
||||
* Thomas Klein <tklein@de.ibm.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#ifndef __EHEA_QMR_H__
|
||||
#define __EHEA_QMR_H__
|
||||
|
||||
#include <linux/prefetch.h>
|
||||
#include "ehea.h"
|
||||
#include "ehea_hw.h"
|
||||
|
||||
/*
|
||||
* page size of ehea hardware queues
|
||||
*/
|
||||
|
||||
#define EHEA_PAGESHIFT 12
|
||||
#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
|
||||
#define EHEA_SECTSIZE (1UL << 24)
|
||||
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
|
||||
#define EHEA_HUGEPAGESHIFT 34
|
||||
#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
|
||||
#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
|
||||
|
||||
#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
|
||||
#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
|
||||
#endif
|
||||
|
||||
/* Some abbreviations used here:
|
||||
*
|
||||
* WQE - Work Queue Entry
|
||||
* SWQE - Send Work Queue Entry
|
||||
* RWQE - Receive Work Queue Entry
|
||||
* CQE - Completion Queue Entry
|
||||
* EQE - Event Queue Entry
|
||||
* MR - Memory Region
|
||||
*/
|
||||
|
||||
/* Use of WR_ID field for EHEA */
|
||||
#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
|
||||
#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
|
||||
#define EHEA_SWQE2_TYPE 0x1
|
||||
#define EHEA_SWQE3_TYPE 0x2
|
||||
#define EHEA_RWQE2_TYPE 0x3
|
||||
#define EHEA_RWQE3_TYPE 0x4
|
||||
#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
|
||||
#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
|
||||
|
||||
struct ehea_vsgentry {
|
||||
u64 vaddr;
|
||||
u32 l_key;
|
||||
u32 len;
|
||||
};
|
||||
|
||||
/* maximum number of sg entries allowed in a WQE */
|
||||
#define EHEA_MAX_WQE_SG_ENTRIES 252
|
||||
#define SWQE2_MAX_IMM (0xD0 - 0x30)
|
||||
#define SWQE3_MAX_IMM 224
|
||||
|
||||
/* tx control flags for swqe */
|
||||
#define EHEA_SWQE_CRC 0x8000
|
||||
#define EHEA_SWQE_IP_CHECKSUM 0x4000
|
||||
#define EHEA_SWQE_TCP_CHECKSUM 0x2000
|
||||
#define EHEA_SWQE_TSO 0x1000
|
||||
#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
|
||||
#define EHEA_SWQE_VLAN_INSERT 0x0400
|
||||
#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
|
||||
#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
|
||||
#define EHEA_SWQE_WRAP_CTL_REC 0x0080
|
||||
#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
|
||||
#define EHEA_SWQE_BIND 0x0020
|
||||
#define EHEA_SWQE_PURGE 0x0010
|
||||
|
||||
/* sizeof(struct ehea_swqe) less the union */
|
||||
#define SWQE_HEADER_SIZE 32
|
||||
|
||||
struct ehea_swqe {
|
||||
u64 wr_id;
|
||||
u16 tx_control;
|
||||
u16 vlan_tag;
|
||||
u8 reserved1;
|
||||
u8 ip_start;
|
||||
u8 ip_end;
|
||||
u8 immediate_data_length;
|
||||
u8 tcp_offset;
|
||||
u8 reserved2;
|
||||
u16 tcp_end;
|
||||
u8 wrap_tag;
|
||||
u8 descriptors; /* number of valid descriptors in WQE */
|
||||
u16 reserved3;
|
||||
u16 reserved4;
|
||||
u16 mss;
|
||||
u32 reserved5;
|
||||
union {
|
||||
/* Send WQE Format 1 */
|
||||
struct {
|
||||
struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
|
||||
} no_immediate_data;
|
||||
|
||||
/* Send WQE Format 2 */
|
||||
struct {
|
||||
struct ehea_vsgentry sg_entry;
|
||||
/* 0x30 */
|
||||
u8 immediate_data[SWQE2_MAX_IMM];
|
||||
/* 0xd0 */
|
||||
struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
|
||||
} immdata_desc __packed;
|
||||
|
||||
/* Send WQE Format 3 */
|
||||
struct {
|
||||
u8 immediate_data[SWQE3_MAX_IMM];
|
||||
} immdata_nodesc;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct ehea_rwqe {
|
||||
u64 wr_id; /* work request ID */
|
||||
u8 reserved1[5];
|
||||
u8 data_segments;
|
||||
u16 reserved2;
|
||||
u64 reserved3;
|
||||
u64 reserved4;
|
||||
struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
|
||||
};
|
||||
|
||||
#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
|
||||
|
||||
#define EHEA_CQE_TYPE_RQ 0x60
|
||||
#define EHEA_CQE_STAT_ERR_MASK 0x700F
|
||||
#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
|
||||
#define EHEA_CQE_BLIND_CKSUM 0x8000
|
||||
#define EHEA_CQE_STAT_ERR_TCP 0x4000
|
||||
#define EHEA_CQE_STAT_ERR_IP 0x2000
|
||||
#define EHEA_CQE_STAT_ERR_CRC 0x1000
|
||||
|
||||
/* Defines which bad send cqe stati lead to a port reset */
|
||||
#define EHEA_CQE_STAT_RESET_MASK 0x0002
|
||||
|
||||
struct ehea_cqe {
|
||||
u64 wr_id; /* work request ID from WQE */
|
||||
u8 type;
|
||||
u8 valid;
|
||||
u16 status;
|
||||
u16 reserved1;
|
||||
u16 num_bytes_transfered;
|
||||
u16 vlan_tag;
|
||||
u16 inet_checksum_value;
|
||||
u8 reserved2;
|
||||
u8 header_length;
|
||||
u16 reserved3;
|
||||
u16 page_offset;
|
||||
u16 wqe_count;
|
||||
u32 qp_token;
|
||||
u32 timestamp;
|
||||
u32 reserved4;
|
||||
u64 reserved5[3];
|
||||
};
|
||||
|
||||
#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
|
||||
#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
|
||||
#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
|
||||
#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
|
||||
#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
|
||||
#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
|
||||
#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
|
||||
#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
|
||||
#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
|
||||
#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
|
||||
#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
|
||||
#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
|
||||
|
||||
#define EHEA_AER_RESTYPE_QP 0x8
|
||||
#define EHEA_AER_RESTYPE_CQ 0x4
|
||||
#define EHEA_AER_RESTYPE_EQ 0x3
|
||||
|
||||
/* Defines which affiliated errors lead to a port reset */
|
||||
#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
|
||||
#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
|
||||
|
||||
struct ehea_eqe {
|
||||
u64 entry;
|
||||
};
|
||||
|
||||
#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
|
||||
#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
|
||||
|
||||
static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
|
||||
{
|
||||
struct ehea_page *current_page;
|
||||
|
||||
if (q_offset >= queue->queue_length)
|
||||
q_offset -= queue->queue_length;
|
||||
current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
|
||||
return ¤t_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
|
||||
}
|
||||
|
||||
static inline void *hw_qeit_get(struct hw_queue *queue)
|
||||
{
|
||||
return hw_qeit_calc(queue, queue->current_q_offset);
|
||||
}
|
||||
|
||||
static inline void hw_qeit_inc(struct hw_queue *queue)
|
||||
{
|
||||
queue->current_q_offset += queue->qe_size;
|
||||
if (queue->current_q_offset >= queue->queue_length) {
|
||||
queue->current_q_offset = 0;
|
||||
/* toggle the valid flag */
|
||||
queue->toggle_state = (~queue->toggle_state) & 1;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *hw_qeit_get_inc(struct hw_queue *queue)
|
||||
{
|
||||
void *retvalue = hw_qeit_get(queue);
|
||||
hw_qeit_inc(queue);
|
||||
return retvalue;
|
||||
}
|
||||
|
||||
static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
|
||||
{
|
||||
struct ehea_cqe *retvalue = hw_qeit_get(queue);
|
||||
u8 valid = retvalue->valid;
|
||||
void *pref;
|
||||
|
||||
if ((valid >> 7) == (queue->toggle_state & 1)) {
|
||||
/* this is a good one */
|
||||
hw_qeit_inc(queue);
|
||||
pref = hw_qeit_calc(queue, queue->current_q_offset);
|
||||
prefetch(pref);
|
||||
prefetch(pref + 128);
|
||||
} else
|
||||
retvalue = NULL;
|
||||
return retvalue;
|
||||
}
|
||||
|
||||
static inline void *hw_qeit_get_valid(struct hw_queue *queue)
|
||||
{
|
||||
struct ehea_cqe *retvalue = hw_qeit_get(queue);
|
||||
void *pref;
|
||||
u8 valid;
|
||||
|
||||
pref = hw_qeit_calc(queue, queue->current_q_offset);
|
||||
prefetch(pref);
|
||||
prefetch(pref + 128);
|
||||
prefetch(pref + 256);
|
||||
valid = retvalue->valid;
|
||||
if (!((valid >> 7) == (queue->toggle_state & 1)))
|
||||
retvalue = NULL;
|
||||
return retvalue;
|
||||
}
|
||||
|
||||
static inline void *hw_qeit_reset(struct hw_queue *queue)
|
||||
{
|
||||
queue->current_q_offset = 0;
|
||||
return hw_qeit_get(queue);
|
||||
}
|
||||
|
||||
static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
|
||||
{
|
||||
u64 last_entry_in_q = queue->queue_length - queue->qe_size;
|
||||
void *retvalue;
|
||||
|
||||
retvalue = hw_qeit_get(queue);
|
||||
queue->current_q_offset += queue->qe_size;
|
||||
if (queue->current_q_offset > last_entry_in_q) {
|
||||
queue->current_q_offset = 0;
|
||||
queue->toggle_state = (~queue->toggle_state) & 1;
|
||||
}
|
||||
return retvalue;
|
||||
}
|
||||
|
||||
static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
|
||||
{
|
||||
void *retvalue = hw_qeit_get(queue);
|
||||
u32 qe = *(u8 *)retvalue;
|
||||
if ((qe >> 7) == (queue->toggle_state & 1))
|
||||
hw_qeit_eq_get_inc(queue);
|
||||
else
|
||||
retvalue = NULL;
|
||||
return retvalue;
|
||||
}
|
||||
|
||||
static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
|
||||
int rq_nr)
|
||||
{
|
||||
struct hw_queue *queue;
|
||||
|
||||
if (rq_nr == 1)
|
||||
queue = &qp->hw_rqueue1;
|
||||
else if (rq_nr == 2)
|
||||
queue = &qp->hw_rqueue2;
|
||||
else
|
||||
queue = &qp->hw_rqueue3;
|
||||
|
||||
return hw_qeit_get_inc(queue);
|
||||
}
|
||||
|
||||
static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
|
||||
int *wqe_index)
|
||||
{
|
||||
struct hw_queue *queue = &my_qp->hw_squeue;
|
||||
struct ehea_swqe *wqe_p;
|
||||
|
||||
*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
|
||||
wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
|
||||
|
||||
return wqe_p;
|
||||
}
|
||||
|
||||
static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
|
||||
{
|
||||
iosync();
|
||||
ehea_update_sqa(my_qp, 1);
|
||||
}
|
||||
|
||||
static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
|
||||
{
|
||||
struct hw_queue *queue = &qp->hw_rqueue1;
|
||||
|
||||
*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
|
||||
return hw_qeit_get_valid(queue);
|
||||
}
|
||||
|
||||
static inline void ehea_inc_cq(struct ehea_cq *cq)
|
||||
{
|
||||
hw_qeit_inc(&cq->hw_queue);
|
||||
}
|
||||
|
||||
static inline void ehea_inc_rq1(struct ehea_qp *qp)
|
||||
{
|
||||
hw_qeit_inc(&qp->hw_rqueue1);
|
||||
}
|
||||
|
||||
static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
|
||||
{
|
||||
return hw_qeit_get_valid(&my_cq->hw_queue);
|
||||
}
|
||||
|
||||
#define EHEA_CQ_REGISTER_ORIG 0
|
||||
#define EHEA_EQ_REGISTER_ORIG 0
|
||||
|
||||
enum ehea_eq_type {
|
||||
EHEA_EQ = 0, /* event queue */
|
||||
EHEA_NEQ /* notification event queue */
|
||||
};
|
||||
|
||||
struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
|
||||
enum ehea_eq_type type,
|
||||
const u32 length, const u8 eqe_gen);
|
||||
|
||||
int ehea_destroy_eq(struct ehea_eq *eq);
|
||||
|
||||
struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
|
||||
|
||||
struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
|
||||
u64 eq_handle, u32 cq_token);
|
||||
|
||||
int ehea_destroy_cq(struct ehea_cq *cq);
|
||||
|
||||
struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
|
||||
struct ehea_qp_init_attr *init_attr);
|
||||
|
||||
int ehea_destroy_qp(struct ehea_qp *qp);
|
||||
|
||||
int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
|
||||
|
||||
int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
|
||||
struct ehea_mr *shared_mr);
|
||||
|
||||
int ehea_rem_mr(struct ehea_mr *mr);
|
||||
|
||||
u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
|
||||
u64 *aer, u64 *aerr);
|
||||
|
||||
int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
|
||||
int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
|
||||
int ehea_create_busmap(void);
|
||||
void ehea_destroy_busmap(void);
|
||||
u64 ehea_map_vaddr(void *caddr);
|
||||
|
||||
#endif /* __EHEA_QMR_H__ */
|
Reference in New Issue
Block a user