ehea/ibm*: Move the IBM drivers

Move the IBM drivers into drivers/net/ethernet/ibm/ and make the
necessary Kconfig and Makefile changes.

- Renamed ibm_new_emac to emac
- Cleaned up Makefile and Kconfig options which referred to
  IBM_NEW_EMAC to IBM_EMAC
- ibmlana driver is a National Semiconductor SONIC driver so
  it was not moved

CC: Christoph Raisch <raisch@de.ibm.com>
CC: Santiago Leon <santil@linux.vnet.ibm.com>
CC: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: David Gibson <dwg@au1.ibm.com>
CC: Kyle Lucke <klucke@us.ibm.com>
CC: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Jeff Kirsher
2011-05-13 14:29:12 -07:00
부모 86387e1ac4
커밋 9aa3283595
36개의 변경된 파일79개의 추가작업 그리고 52개의 파일을 삭제

파일 보기

@@ -19,6 +19,7 @@ source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"

파일 보기

@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/

파일 보기

@@ -0,0 +1,47 @@
#
# IBM device configuration.
#
config NET_VENDOR_IBM
bool "IBM devices"
depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \
(IBMEBUS && INET && SPARSEMEM)
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about IBM devices. If you say Y, you will be asked for
your specific card in the following questions.
if NET_VENDOR_IBM
config IBMVETH
tristate "IBM LAN Virtual Ethernet support"
depends on PPC_PSERIES
---help---
This driver supports virtual ethernet adapters on newer IBM iSeries
and pSeries systems.
To compile this driver as a module, choose M here. The module will
be called ibmveth.
config ISERIES_VETH
tristate "iSeries Virtual Ethernet driver support"
depends on PPC_ISERIES
source "drivers/net/ethernet/ibm/emac/Kconfig"
config EHEA
tristate "eHEA Ethernet support"
depends on IBMEBUS && INET && SPARSEMEM
select INET_LRO
---help---
This driver supports the IBM pSeries eHEA ethernet adapter.
To compile the driver as a module, choose M here. The module
will be called ehea.
endif # NET_VENDOR_IBM

파일 보기

@@ -0,0 +1,8 @@
#
# Makefile for th IBM network device drivers.
#
obj-$(CONFIG_IBMVETH) += ibmveth.o
obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o
obj-$(CONFIG_IBM_EMAC) += emac/
obj-$(CONFIG_EHEA) += ehea/

파일 보기

@@ -0,0 +1,6 @@
#
# Makefile for the eHEA ethernet device driver for IBM eServer System p
#
ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o
obj-$(CONFIG_EHEA) += ehea.o

파일 보기

@@ -0,0 +1,504 @@
/*
* linux/drivers/net/ehea/ehea.h
*
* eHEA ethernet device driver for IBM eServer System p
*
* (C) Copyright IBM Corp. 2006
*
* Authors:
* Christoph Raisch <raisch@de.ibm.com>
* Jan-Bernd Themann <themann@de.ibm.com>
* Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __EHEA_H__
#define __EHEA_H__
#include <linux/module.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/if_vlan.h>
#include <linux/inet_lro.h>
#include <asm/ibmebus.h>
#include <asm/abs_addr.h>
#include <asm/io.h>
#define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0107"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
#define DLPAR_MEM_ADD 2
#define DLPAR_MEM_REM 4
#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
#define EHEA_MAX_ENTRIES_RQ1 32767
#define EHEA_MAX_ENTRIES_RQ2 16383
#define EHEA_MAX_ENTRIES_RQ3 16383
#define EHEA_MAX_ENTRIES_SQ 32767
#define EHEA_MIN_ENTRIES_QP 127
#define EHEA_SMALL_QUEUES
#define EHEA_NUM_TX_QP 1
#define EHEA_LRO_MAX_AGGR 64
#ifdef EHEA_SMALL_QUEUES
#define EHEA_MAX_CQE_COUNT 1023
#define EHEA_DEF_ENTRIES_SQ 1023
#define EHEA_DEF_ENTRIES_RQ1 4095
#define EHEA_DEF_ENTRIES_RQ2 1023
#define EHEA_DEF_ENTRIES_RQ3 1023
#else
#define EHEA_MAX_CQE_COUNT 4080
#define EHEA_DEF_ENTRIES_SQ 4080
#define EHEA_DEF_ENTRIES_RQ1 8160
#define EHEA_DEF_ENTRIES_RQ2 2040
#define EHEA_DEF_ENTRIES_RQ3 2040
#endif
#define EHEA_MAX_ENTRIES_EQ 20
#define EHEA_SG_SQ 2
#define EHEA_SG_RQ1 1
#define EHEA_SG_RQ2 0
#define EHEA_SG_RQ3 0
#define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
#define EHEA_RQ2_PKT_SIZE 1522
#define EHEA_L_PKT_SIZE 256 /* low latency */
#define MAX_LRO_DESCRIPTORS 8
/* Send completion signaling */
/* Protection Domain Identifier */
#define EHEA_PD_ID 0xaabcdeff
#define EHEA_RQ2_THRESHOLD 1
#define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */
#define EHEA_SPEED_10G 10000
#define EHEA_SPEED_1G 1000
#define EHEA_SPEED_100M 100
#define EHEA_SPEED_10M 10
#define EHEA_SPEED_AUTONEG 0
/* Broadcast/Multicast registration types */
#define EHEA_BCMC_SCOPE_ALL 0x08
#define EHEA_BCMC_SCOPE_SINGLE 0x00
#define EHEA_BCMC_MULTICAST 0x04
#define EHEA_BCMC_BROADCAST 0x00
#define EHEA_BCMC_UNTAGGED 0x02
#define EHEA_BCMC_TAGGED 0x00
#define EHEA_BCMC_VLANID_ALL 0x01
#define EHEA_BCMC_VLANID_SINGLE 0x00
#define EHEA_CACHE_LINE 128
/* Memory Regions */
#define EHEA_MR_ACC_CTRL 0x00800000
#define EHEA_BUSMAP_START 0x8000000000000000ULL
#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
/* utility functions */
void ehea_dump(void *adr, int len, char *msg);
#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
#define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
#define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
#define EHEA_BMASK_MASK(mask) \
(0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
#define EHEA_BMASK_SET(mask, value) \
((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
#define EHEA_BMASK_GET(mask, value) \
(EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
/*
* Generic ehea page
*/
struct ehea_page {
u8 entries[PAGE_SIZE];
};
/*
* Generic queue in linux kernel virtual memory
*/
struct hw_queue {
u64 current_q_offset; /* current queue entry */
struct ehea_page **queue_pages; /* array of pages belonging to queue */
u32 qe_size; /* queue entry size */
u32 queue_length; /* queue length allocated in bytes */
u32 pagesize;
u32 toggle_state; /* toggle flag - per page */
u32 reserved; /* 64 bit alignment */
};
/*
* For pSeries this is a 64bit memory address where
* I/O memory is mapped into CPU address space
*/
struct h_epa {
void __iomem *addr;
};
struct h_epa_user {
u64 addr;
};
struct h_epas {
struct h_epa kernel; /* kernel space accessible resource,
set to 0 if unused */
struct h_epa_user user; /* user space accessible resource
set to 0 if unused */
};
/*
* Memory map data structures
*/
struct ehea_dir_bmap
{
u64 ent[EHEA_MAP_ENTRIES];
};
struct ehea_top_bmap
{
struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
};
struct ehea_bmap
{
struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
};
struct ehea_qp;
struct ehea_cq;
struct ehea_eq;
struct ehea_port;
struct ehea_av;
/*
* Queue attributes passed to ehea_create_qp()
*/
struct ehea_qp_init_attr {
/* input parameter */
u32 qp_token; /* queue token */
u8 low_lat_rq1;
u8 signalingtype; /* cqe generation flag */
u8 rq_count; /* num of receive queues */
u8 eqe_gen; /* eqe generation flag */
u16 max_nr_send_wqes; /* max number of send wqes */
u16 max_nr_rwqes_rq1; /* max number of receive wqes */
u16 max_nr_rwqes_rq2;
u16 max_nr_rwqes_rq3;
u8 wqe_size_enc_sq;
u8 wqe_size_enc_rq1;
u8 wqe_size_enc_rq2;
u8 wqe_size_enc_rq3;
u8 swqe_imm_data_len; /* immediate data length for swqes */
u16 port_nr;
u16 rq2_threshold;
u16 rq3_threshold;
u64 send_cq_handle;
u64 recv_cq_handle;
u64 aff_eq_handle;
/* output parameter */
u32 qp_nr;
u16 act_nr_send_wqes;
u16 act_nr_rwqes_rq1;
u16 act_nr_rwqes_rq2;
u16 act_nr_rwqes_rq3;
u8 act_wqe_size_enc_sq;
u8 act_wqe_size_enc_rq1;
u8 act_wqe_size_enc_rq2;
u8 act_wqe_size_enc_rq3;
u32 nr_sq_pages;
u32 nr_rq1_pages;
u32 nr_rq2_pages;
u32 nr_rq3_pages;
u32 liobn_sq;
u32 liobn_rq1;
u32 liobn_rq2;
u32 liobn_rq3;
};
/*
* Event Queue attributes, passed as parameter
*/
struct ehea_eq_attr {
u32 type;
u32 max_nr_of_eqes;
u8 eqe_gen; /* generate eqe flag */
u64 eq_handle;
u32 act_nr_of_eqes;
u32 nr_pages;
u32 ist1; /* Interrupt service token */
u32 ist2;
u32 ist3;
u32 ist4;
};
/*
* Event Queue
*/
struct ehea_eq {
struct ehea_adapter *adapter;
struct hw_queue hw_queue;
u64 fw_handle;
struct h_epas epas;
spinlock_t spinlock;
struct ehea_eq_attr attr;
};
/*
* HEA Queues
*/
struct ehea_qp {
struct ehea_adapter *adapter;
u64 fw_handle; /* QP handle for firmware calls */
struct hw_queue hw_squeue;
struct hw_queue hw_rqueue1;
struct hw_queue hw_rqueue2;
struct hw_queue hw_rqueue3;
struct h_epas epas;
struct ehea_qp_init_attr init_attr;
};
/*
* Completion Queue attributes
*/
struct ehea_cq_attr {
/* input parameter */
u32 max_nr_of_cqes;
u32 cq_token;
u64 eq_handle;
/* output parameter */
u32 act_nr_of_cqes;
u32 nr_pages;
};
/*
* Completion Queue
*/
struct ehea_cq {
struct ehea_adapter *adapter;
u64 fw_handle;
struct hw_queue hw_queue;
struct h_epas epas;
struct ehea_cq_attr attr;
};
/*
* Memory Region
*/
struct ehea_mr {
struct ehea_adapter *adapter;
u64 handle;
u64 vaddr;
u32 lkey;
};
/*
* Port state information
*/
struct port_stats {
int poll_receive_errors;
int queue_stopped;
int err_tcp_cksum;
int err_ip_cksum;
int err_frame_crc;
};
#define EHEA_IRQ_NAME_SIZE 20
/*
* Queue SKB Array
*/
struct ehea_q_skb_arr {
struct sk_buff **arr; /* skb array for queue */
int len; /* array length */
int index; /* array index */
int os_skbs; /* rq2/rq3 only: outstanding skbs */
};
/*
* Port resources
*/
struct ehea_port_res {
struct napi_struct napi;
struct port_stats p_stats;
struct ehea_mr send_mr; /* send memory region */
struct ehea_mr recv_mr; /* receive memory region */
spinlock_t xmit_lock;
struct ehea_port *port;
char int_recv_name[EHEA_IRQ_NAME_SIZE];
char int_send_name[EHEA_IRQ_NAME_SIZE];
struct ehea_qp *qp;
struct ehea_cq *send_cq;
struct ehea_cq *recv_cq;
struct ehea_eq *eq;
struct ehea_q_skb_arr rq1_skba;
struct ehea_q_skb_arr rq2_skba;
struct ehea_q_skb_arr rq3_skba;
struct ehea_q_skb_arr sq_skba;
int sq_skba_size;
spinlock_t netif_queue;
int queue_stopped;
int swqe_refill_th;
atomic_t swqe_avail;
int swqe_ll_count;
u32 swqe_id_counter;
u64 tx_packets;
u64 tx_bytes;
u64 rx_packets;
u64 rx_bytes;
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
int sq_restart_flag;
};
#define EHEA_MAX_PORTS 16
#define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle,
RecvCQ handle, EQ handle,
SendMR handle, RecvMR handle */
#define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */
#define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */
struct ehea_adapter {
u64 handle;
struct platform_device *ofdev;
struct ehea_port *port[EHEA_MAX_PORTS];
struct ehea_eq *neq; /* notification event queue */
struct tasklet_struct neq_tasklet;
struct ehea_mr mr;
u32 pd; /* protection domain */
u64 max_mc_mac; /* max number of multicast mac addresses */
int active_ports;
struct list_head list;
};
struct ehea_mc_list {
struct list_head list;
u64 macaddr;
};
/* kdump support */
struct ehea_fw_handle_entry {
u64 adh; /* Adapter Handle */
u64 fwh; /* Firmware Handle */
};
struct ehea_fw_handle_array {
struct ehea_fw_handle_entry *arr;
int num_entries;
struct mutex lock;
};
struct ehea_bcmc_reg_entry {
u64 adh; /* Adapter Handle */
u32 port_id; /* Logical Port Id */
u8 reg_type; /* Registration Type */
u64 macaddr;
};
struct ehea_bcmc_reg_array {
struct ehea_bcmc_reg_entry *arr;
int num_entries;
spinlock_t lock;
};
#define EHEA_PORT_UP 1
#define EHEA_PORT_DOWN 0
#define EHEA_PHY_LINK_UP 1
#define EHEA_PHY_LINK_DOWN 0
#define EHEA_MAX_PORT_RES 16
struct ehea_port {
struct ehea_adapter *adapter; /* adapter that owns this port */
struct net_device *netdev;
struct net_device_stats stats;
struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
struct platform_device ofdev; /* Open Firmware Device */
struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
struct ehea_eq *qp_eq;
struct work_struct reset_task;
struct mutex port_lock;
char int_aff_name[EHEA_IRQ_NAME_SIZE];
int allmulti; /* Indicates IFF_ALLMULTI state */
int promisc; /* Indicates IFF_PROMISC state */
int num_tx_qps;
int num_add_tx_qps;
int num_mcs;
int resets;
unsigned long flags;
u64 mac_addr;
u32 logical_port_id;
u32 port_speed;
u32 msg_enable;
u32 sig_comp_iv;
u32 state;
u32 lro_max_aggr;
u8 phy_link;
u8 full_duplex;
u8 autoneg;
u8 num_def_qps;
wait_queue_head_t swqe_avail_wq;
wait_queue_head_t restart_wq;
};
struct port_res_cfg {
int max_entries_rcq;
int max_entries_scq;
int max_entries_sq;
int max_entries_rq1;
int max_entries_rq2;
int max_entries_rq3;
};
enum ehea_flag_bits {
__EHEA_STOP_XFER,
__EHEA_DISABLE_PORT_RESET
};
void ehea_set_ethtool_ops(struct net_device *netdev);
int ehea_sense_port_attr(struct ehea_port *port);
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
#endif /* __EHEA_H__ */

파일 보기

@@ -0,0 +1,295 @@
/*
* linux/drivers/net/ehea/ehea_ethtool.c
*
* eHEA ethernet device driver for IBM eServer System p
*
* (C) Copyright IBM Corp. 2006
*
* Authors:
* Christoph Raisch <raisch@de.ibm.com>
* Jan-Bernd Themann <themann@de.ibm.com>
* Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ehea.h"
#include "ehea_phyp.h"
static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ehea_port *port = netdev_priv(dev);
u32 speed;
int ret;
ret = ehea_sense_port_attr(port);
if (ret)
return ret;
if (netif_carrier_ok(dev)) {
switch (port->port_speed) {
case EHEA_SPEED_10M:
speed = SPEED_10;
break;
case EHEA_SPEED_100M:
speed = SPEED_100;
break;
case EHEA_SPEED_1G:
speed = SPEED_1000;
break;
case EHEA_SPEED_10G:
speed = SPEED_10000;
break;
default:
speed = -1;
break; /* BUG */
}
cmd->duplex = port->full_duplex == 1 ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
speed = ~0;
cmd->duplex = -1;
}
ethtool_cmd_speed_set(cmd, speed);
if (cmd->speed == SPEED_10000) {
cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
cmd->port = PORT_FIBRE;
} else {
cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
| SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
| SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
| SUPPORTED_TP);
cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
| ADVERTISED_TP);
cmd->port = PORT_TP;
}
cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ehea_port *port = netdev_priv(dev);
int ret = 0;
u32 sp;
if (cmd->autoneg == AUTONEG_ENABLE) {
sp = EHEA_SPEED_AUTONEG;
goto doit;
}
switch (cmd->speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_10M_F;
else
sp = H_SPEED_10M_H;
break;
case SPEED_100:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_100M_F;
else
sp = H_SPEED_100M_H;
break;
case SPEED_1000:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_1G_F;
else
ret = -EINVAL;
break;
case SPEED_10000:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_10G_F;
else
ret = -EINVAL;
break;
default:
ret = -EINVAL;
break;
}
if (ret)
goto out;
doit:
ret = ehea_set_portspeed(port, sp);
if (!ret)
netdev_info(dev,
"Port speed successfully set: %dMbps %s Duplex\n",
port->port_speed,
port->full_duplex == 1 ? "Full" : "Half");
out:
return ret;
}
static int ehea_nway_reset(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
int ret;
ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
if (!ret)
netdev_info(port->netdev,
"Port speed successfully set: %dMbps %s Duplex\n",
port->port_speed,
port->full_duplex == 1 ? "Full" : "Half");
return ret;
}
static void ehea_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 ehea_get_msglevel(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
return port->msg_enable;
}
static void ehea_set_msglevel(struct net_device *dev, u32 value)
{
struct ehea_port *port = netdev_priv(dev);
port->msg_enable = value;
}
static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"sig_comp_iv"},
{"swqe_refill_th"},
{"port resets"},
{"Receive errors"},
{"TCP cksum errors"},
{"IP cksum errors"},
{"Frame cksum errors"},
{"num SQ stopped"},
{"SQ stopped"},
{"PR0 free_swqes"},
{"PR1 free_swqes"},
{"PR2 free_swqes"},
{"PR3 free_swqes"},
{"PR4 free_swqes"},
{"PR5 free_swqes"},
{"PR6 free_swqes"},
{"PR7 free_swqes"},
{"LRO aggregated"},
{"LRO flushed"},
{"LRO no_desc"},
};
static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
if (stringset == ETH_SS_STATS) {
memcpy(data, &ehea_ethtool_stats_keys,
sizeof(ehea_ethtool_stats_keys));
}
}
static int ehea_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(ehea_ethtool_stats_keys);
default:
return -EOPNOTSUPP;
}
}
static void ehea_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
int i, k, tmp;
struct ehea_port *port = netdev_priv(dev);
for (i = 0; i < ehea_get_sset_count(dev, ETH_SS_STATS); i++)
data[i] = 0;
i = 0;
data[i++] = port->sig_comp_iv;
data[i++] = port->port_res[0].swqe_refill_th;
data[i++] = port->resets;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.poll_receive_errors;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.err_tcp_cksum;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.err_ip_cksum;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.err_frame_crc;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.queue_stopped;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp |= port->port_res[k].queue_stopped;
data[i++] = tmp;
for (k = 0; k < 8; k++)
data[i++] = atomic_read(&port->port_res[k].swqe_avail);
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp |= port->port_res[k].lro_mgr.stats.aggregated;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp |= port->port_res[k].lro_mgr.stats.flushed;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp |= port->port_res[k].lro_mgr.stats.no_desc;
data[i++] = tmp;
}
const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
.get_msglevel = ehea_get_msglevel,
.set_msglevel = ehea_set_msglevel,
.get_link = ethtool_op_get_link,
.get_strings = ehea_get_strings,
.get_sset_count = ehea_get_sset_count,
.get_ethtool_stats = ehea_get_ethtool_stats,
.set_settings = ehea_set_settings,
.nway_reset = ehea_nway_reset, /* Restart autonegotiation */
};
void ehea_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
}

파일 보기

@@ -0,0 +1,292 @@
/*
* linux/drivers/net/ehea/ehea_hw.h
*
* eHEA ethernet device driver for IBM eServer System p
*
* (C) Copyright IBM Corp. 2006
*
* Authors:
* Christoph Raisch <raisch@de.ibm.com>
* Jan-Bernd Themann <themann@de.ibm.com>
* Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __EHEA_HW_H__
#define __EHEA_HW_H__
#define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63)
#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63)
#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63)
#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63)
#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
struct ehea_qptemm {
u64 qpx_hcr;
u64 qpx_c;
u64 qpx_herr;
u64 qpx_aer;
u64 qpx_sqa;
u64 qpx_sqc;
u64 qpx_rq1a;
u64 qpx_rq1c;
u64 qpx_st;
u64 qpx_aerr;
u64 qpx_tenure;
u64 qpx_reserved1[(0x098 - 0x058) / 8];
u64 qpx_portp;
u64 qpx_reserved2[(0x100 - 0x0A0) / 8];
u64 qpx_t;
u64 qpx_sqhp;
u64 qpx_sqptp;
u64 qpx_reserved3[(0x140 - 0x118) / 8];
u64 qpx_sqwsize;
u64 qpx_reserved4[(0x170 - 0x148) / 8];
u64 qpx_sqsize;
u64 qpx_reserved5[(0x1B0 - 0x178) / 8];
u64 qpx_sigt;
u64 qpx_wqecnt;
u64 qpx_rq1hp;
u64 qpx_rq1ptp;
u64 qpx_rq1size;
u64 qpx_reserved6[(0x220 - 0x1D8) / 8];
u64 qpx_rq1wsize;
u64 qpx_reserved7[(0x240 - 0x228) / 8];
u64 qpx_pd;
u64 qpx_scqn;
u64 qpx_rcqn;
u64 qpx_aeqn;
u64 reserved49;
u64 qpx_ram;
u64 qpx_reserved8[(0x300 - 0x270) / 8];
u64 qpx_rq2a;
u64 qpx_rq2c;
u64 qpx_rq2hp;
u64 qpx_rq2ptp;
u64 qpx_rq2size;
u64 qpx_rq2wsize;
u64 qpx_rq2th;
u64 qpx_rq3a;
u64 qpx_rq3c;
u64 qpx_rq3hp;
u64 qpx_rq3ptp;
u64 qpx_rq3size;
u64 qpx_rq3wsize;
u64 qpx_rq3th;
u64 qpx_lpn;
u64 qpx_reserved9[(0x400 - 0x378) / 8];
u64 reserved_ext[(0x500 - 0x400) / 8];
u64 reserved2[(0x1000 - 0x500) / 8];
};
#define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)
#define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)
struct ehea_mrmwmm {
u64 mrx_hcr;
u64 mrx_c;
u64 mrx_herr;
u64 mrx_aer;
u64 mrx_pp;
u64 reserved1;
u64 reserved2;
u64 reserved3;
u64 reserved4[(0x200 - 0x40) / 8];
u64 mrx_ctl[64];
};
#define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)
struct ehea_qpedmm {
u64 reserved0[(0x400) / 8];
u64 qpedx_phh;
u64 qpedx_ppsgp;
u64 qpedx_ppsgu;
u64 qpedx_ppdgp;
u64 qpedx_ppdgu;
u64 qpedx_aph;
u64 qpedx_apsgp;
u64 qpedx_apsgu;
u64 qpedx_apdgp;
u64 qpedx_apdgu;
u64 qpedx_apav;
u64 qpedx_apsav;
u64 qpedx_hcr;
u64 reserved1[4];
u64 qpedx_rrl0;
u64 qpedx_rrrkey0;
u64 qpedx_rrva0;
u64 reserved2;
u64 qpedx_rrl1;
u64 qpedx_rrrkey1;
u64 qpedx_rrva1;
u64 reserved3;
u64 qpedx_rrl2;
u64 qpedx_rrrkey2;
u64 qpedx_rrva2;
u64 reserved4;
u64 qpedx_rrl3;
u64 qpedx_rrrkey3;
u64 qpedx_rrva3;
};
#define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
#define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
#define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
#define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)
#define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)
struct ehea_cqtemm {
u64 cqx_hcr;
u64 cqx_c;
u64 cqx_herr;
u64 cqx_aer;
u64 cqx_ptp;
u64 cqx_tp;
u64 cqx_fec;
u64 cqx_feca;
u64 cqx_ep;
u64 cqx_eq;
u64 reserved1;
u64 cqx_n0;
u64 cqx_n1;
u64 reserved2[(0x1000 - 0x60) / 8];
};
#define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)
struct ehea_eqtemm {
u64 eqx_hcr;
u64 eqx_c;
u64 eqx_herr;
u64 eqx_aer;
u64 eqx_ptp;
u64 eqx_tp;
u64 eqx_ssba;
u64 eqx_psba;
u64 eqx_cec;
u64 eqx_meql;
u64 eqx_xisbi;
u64 eqx_xisc;
u64 eqx_it;
};
/*
* These access functions will be changed when the dissuccsion about
* the new access methods for POWER has settled.
*/
static inline u64 epa_load(struct h_epa epa, u32 offset)
{
return __raw_readq((void __iomem *)(epa.addr + offset));
}
static inline void epa_store(struct h_epa epa, u32 offset, u64 value)
{
__raw_writeq(value, (void __iomem *)(epa.addr + offset));
epa_load(epa, offset); /* synchronize explicitly to eHEA */
}
static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
{
__raw_writeq(value, (void __iomem *)(epa.addr + offset));
}
#define epa_store_eq(epa, offset, value)\
epa_store(epa, EQTEMM_OFFSET(offset), value)
#define epa_load_eq(epa, offset)\
epa_load(epa, EQTEMM_OFFSET(offset))
#define epa_store_cq(epa, offset, value)\
epa_store(epa, CQTEMM_OFFSET(offset), value)
#define epa_load_cq(epa, offset)\
epa_load(epa, CQTEMM_OFFSET(offset))
#define epa_store_qp(epa, offset, value)\
epa_store(epa, QPTEMM_OFFSET(offset), value)
#define epa_load_qp(epa, offset)\
epa_load(epa, QPTEMM_OFFSET(offset))
#define epa_store_qped(epa, offset, value)\
epa_store(epa, QPEDMM_OFFSET(offset), value)
#define epa_load_qped(epa, offset)\
epa_load(epa, QPEDMM_OFFSET(offset))
#define epa_store_mrmw(epa, offset, value)\
epa_store(epa, MRMWMM_OFFSET(offset), value)
#define epa_load_mrmw(epa, offset)\
epa_load(epa, MRMWMM_OFFSET(offset))
#define epa_store_base(epa, offset, value)\
epa_store(epa, HCAGR_OFFSET(offset), value)
#define epa_load_base(epa, offset)\
epa_load(epa, HCAGR_OFFSET(offset))
static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
{
struct h_epa epa = qp->epas.kernel;
epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa),
EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes));
}
static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes)
{
struct h_epa epa = qp->epas.kernel;
epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a),
EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes));
}
static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes)
{
struct h_epa epa = qp->epas.kernel;
epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a),
EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes));
}
static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes)
{
struct h_epa epa = qp->epas.kernel;
epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a),
EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes));
}
static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes)
{
struct h_epa epa = cq->epas.kernel;
epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca),
EHEA_BMASK_SET(CQX_FECADDER, nr_cqes));
}
static inline void ehea_reset_cq_n1(struct ehea_cq *cq)
{
struct h_epa epa = cq->epas.kernel;
epa_store_cq(epa, cqx_n1,
EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1));
}
static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq)
{
struct h_epa epa = my_cq->epas.kernel;
epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep),
EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0));
}
#endif /* __EHEA_HW_H__ */

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다. Load Diff

파일 보기

@@ -0,0 +1,626 @@
/*
* linux/drivers/net/ehea/ehea_phyp.c
*
* eHEA ethernet device driver for IBM eServer System p
*
* (C) Copyright IBM Corp. 2006
*
* Authors:
* Christoph Raisch <raisch@de.ibm.com>
* Jan-Bernd Themann <themann@de.ibm.com>
* Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ehea_phyp.h"
static inline u16 get_order_of_qentries(u16 queue_entries)
{
u8 ld = 1; /* logarithmus dualis */
while (((1U << ld) - 1) < queue_entries)
ld++;
return ld - 1;
}
/* Defines for H_CALL H_ALLOC_RESOURCE */
#define H_ALL_RES_TYPE_QP 1
#define H_ALL_RES_TYPE_CQ 2
#define H_ALL_RES_TYPE_EQ 3
#define H_ALL_RES_TYPE_MR 5
#define H_ALL_RES_TYPE_MW 6
static long ehea_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
unsigned long arg6,
unsigned long arg7)
{
long ret;
int i, sleep_msecs;
for (i = 0; i < 5; i++) {
ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
arg5, arg6, arg7);
if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
continue;
}
if (ret < H_SUCCESS)
pr_err("opcode=%lx ret=%lx"
" arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
" arg5=%lx arg6=%lx arg7=%lx\n",
opcode, ret,
arg1, arg2, arg3, arg4, arg5, arg6, arg7);
return ret;
}
return H_BUSY;
}
static long ehea_plpar_hcall9(unsigned long opcode,
unsigned long *outs, /* array of 9 outputs */
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
unsigned long arg6,
unsigned long arg7,
unsigned long arg8,
unsigned long arg9)
{
long ret;
int i, sleep_msecs;
u8 cb_cat;
for (i = 0; i < 5; i++) {
ret = plpar_hcall9(opcode, outs,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
continue;
}
cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
&& (opcode == H_MODIFY_HEA_PORT))
&& (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
|| (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
&& (arg3 == H_PORT_CB7_DUCQPN)))))
pr_err("opcode=%lx ret=%lx"
" arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
" arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
" arg9=%lx"
" out1=%lx out2=%lx out3=%lx out4=%lx"
" out5=%lx out6=%lx out7=%lx out8=%lx"
" out9=%lx\n",
opcode, ret,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9,
outs[0], outs[1], outs[2], outs[3], outs[4],
outs[5], outs[6], outs[7], outs[8]);
return ret;
}
return H_BUSY;
}
u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
const u64 qp_handle, const u64 sel_mask, void *cb_addr)
{
return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
adapter_handle, /* R4 */
qp_category, /* R5 */
qp_handle, /* R6 */
sel_mask, /* R7 */
virt_to_abs(cb_addr), /* R8 */
0, 0);
}
/* input param R5 */
#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
/* input param R9 */
#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63)
/* input param R10 */
#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
/* Max Send Scatter Gather Elements */
#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
/* Max Receive SG Elements RQ1 */
#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
/* input param R11 */
#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
/* max swqe immediate data length */
#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
/* input param R12 */
#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
/* Threshold RQ2 */
#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
/* Threshold RQ3 */
/* output param R6 */
#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
/* output param, R7 */
#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
/* output param R8,R9 */
#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
/* output param R11,R12 */
#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
struct ehea_qp_init_attr *init_attr, const u32 pd,
u64 *qp_handle, struct h_epas *h_epas)
{
u64 hret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
u64 allocate_controls =
EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
| EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
| EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */
| EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */
| EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
| EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
| EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
| EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
| EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
| EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
u64 max_r10_reg =
EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
get_order_of_qentries(init_attr->max_nr_send_wqes))
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
init_attr->wqe_size_enc_rq1)
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
init_attr->wqe_size_enc_rq2)
| EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
init_attr->wqe_size_enc_rq3);
u64 r11_in =
EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
| EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
u64 threshold =
EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
| EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
outs,
adapter_handle, /* R4 */
allocate_controls, /* R5 */
init_attr->send_cq_handle, /* R6 */
init_attr->recv_cq_handle, /* R7 */
init_attr->aff_eq_handle, /* R8 */
r9_reg, /* R9 */
max_r10_reg, /* R10 */
r11_in, /* R11 */
threshold); /* R12 */
*qp_handle = outs[0];
init_attr->qp_nr = (u32)outs[1];
init_attr->act_nr_send_wqes =
(u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
init_attr->act_nr_rwqes_rq1 =
(u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
init_attr->act_nr_rwqes_rq2 =
(u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
init_attr->act_nr_rwqes_rq3 =
(u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
init_attr->nr_sq_pages =
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
init_attr->nr_rq1_pages =
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
init_attr->nr_rq2_pages =
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
init_attr->nr_rq3_pages =
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
init_attr->liobn_sq =
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
init_attr->liobn_rq1 =
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
init_attr->liobn_rq2 =
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
init_attr->liobn_rq3 =
(u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
if (!hret)
hcp_epas_ctor(h_epas, outs[6], outs[6]);
return hret;
}
u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
struct ehea_cq_attr *cq_attr,
u64 *cq_handle, struct h_epas *epas)
{
u64 hret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
outs,
adapter_handle, /* R4 */
H_ALL_RES_TYPE_CQ, /* R5 */
cq_attr->eq_handle, /* R6 */
cq_attr->cq_token, /* R7 */
cq_attr->max_nr_of_cqes, /* R8 */
0, 0, 0, 0); /* R9-R12 */
*cq_handle = outs[0];
cq_attr->act_nr_of_cqes = outs[3];
cq_attr->nr_pages = outs[4];
if (!hret)
hcp_epas_ctor(epas, outs[5], outs[6]);
return hret;
}
/* Defines for H_CALL H_ALLOC_RESOURCE */
#define H_ALL_RES_TYPE_QP 1
#define H_ALL_RES_TYPE_CQ 2
#define H_ALL_RES_TYPE_EQ 3
#define H_ALL_RES_TYPE_MR 5
#define H_ALL_RES_TYPE_MW 6
/* input param R5 */
#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
#define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
#define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
/* input param R6 */
#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
/* output param R6 */
#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
/* output param R7 */
#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
/* output param R8 */
#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
/* output param R9 */
#define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
#define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63)
/* output param R10 */
#define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63)
/* output param R11 */
#define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63)
/* output param R12 */
#define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63)
u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
struct ehea_eq_attr *eq_attr, u64 *eq_handle)
{
u64 hret, allocate_controls;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
/* resource type */
allocate_controls =
EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
| EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
| EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
| EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
outs,
adapter_handle, /* R4 */
allocate_controls, /* R5 */
eq_attr->max_nr_of_eqes, /* R6 */
0, 0, 0, 0, 0, 0); /* R7-R10 */
*eq_handle = outs[0];
eq_attr->act_nr_of_eqes = outs[3];
eq_attr->nr_pages = outs[4];
eq_attr->ist1 = outs[5];
eq_attr->ist2 = outs[6];
eq_attr->ist3 = outs[7];
eq_attr->ist4 = outs[8];
return hret;
}
u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
const u64 qp_handle, const u64 sel_mask,
void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
u16 *out_swr, u16 *out_rwr)
{
u64 hret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
outs,
adapter_handle, /* R4 */
(u64) cat, /* R5 */
qp_handle, /* R6 */
sel_mask, /* R7 */
virt_to_abs(cb_addr), /* R8 */
0, 0, 0, 0); /* R9-R12 */
*inv_attr_id = outs[0];
*out_swr = outs[3];
*out_rwr = outs[4];
*proc_mask = outs[5];
return hret;
}
u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
const u8 queue_type, const u64 resource_handle,
const u64 log_pageaddr, u64 count)
{
u64 reg_control;
reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
| EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
adapter_handle, /* R4 */
reg_control, /* R5 */
resource_handle, /* R6 */
log_pageaddr, /* R7 */
count, /* R8 */
0, 0); /* R9-R10 */
}
u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
struct ehea_mr *mr)
{
u64 hret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
hret = ehea_plpar_hcall9(H_REGISTER_SMR,
outs,
adapter_handle , /* R4 */
orig_mr_handle, /* R5 */
vaddr_in, /* R6 */
(((u64)access_ctrl) << 32ULL), /* R7 */
pd, /* R8 */
0, 0, 0, 0); /* R9-R12 */
mr->handle = outs[0];
mr->lkey = (u32)outs[2];
return hret;
}
u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
{
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
outs,
adapter_handle, /* R4 */
H_DISABLE_GET_EHEA_WQE_P, /* R5 */
qp_handle, /* R6 */
0, 0, 0, 0, 0, 0); /* R7-R12 */
}
u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
u64 force_bit)
{
return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle, /* R4 */
res_handle, /* R5 */
force_bit,
0, 0, 0, 0); /* R7-R10 */
}
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
const u64 length, const u32 access_ctrl,
const u32 pd, u64 *mr_handle, u32 *lkey)
{
u64 hret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
outs,
adapter_handle, /* R4 */
5, /* R5 */
vaddr, /* R6 */
length, /* R7 */
(((u64) access_ctrl) << 32ULL), /* R8 */
pd, /* R9 */
0, 0, 0); /* R10-R12 */
*mr_handle = outs[0];
*lkey = (u32)outs[2];
return hret;
}
u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
const u8 pagesize, const u8 queue_type,
const u64 log_pageaddr, const u64 count)
{
if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
pr_err("not on pageboundary\n");
return H_PARAMETER;
}
return ehea_h_register_rpage(adapter_handle, pagesize,
queue_type, mr_handle,
log_pageaddr, count);
}
u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
{
u64 hret, cb_logaddr;
cb_logaddr = virt_to_abs(cb_addr);
hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
adapter_handle, /* R4 */
cb_logaddr, /* R5 */
0, 0, 0, 0, 0); /* R6-R10 */
#ifdef DEBUG
ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
#endif
return hret;
}
u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
const u8 cb_cat, const u64 select_mask,
void *cb_addr)
{
u64 port_info;
u64 cb_logaddr = virt_to_abs(cb_addr);
u64 arr_index = 0;
port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
| EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
adapter_handle, /* R4 */
port_info, /* R5 */
select_mask, /* R6 */
arr_index, /* R7 */
cb_logaddr, /* R8 */
0, 0); /* R9-R10 */
}
u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
const u8 cb_cat, const u64 select_mask,
void *cb_addr)
{
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
u64 port_info;
u64 arr_index = 0;
u64 cb_logaddr = virt_to_abs(cb_addr);
port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
| EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
#ifdef DEBUG
ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
#endif
return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
outs,
adapter_handle, /* R4 */
port_info, /* R5 */
select_mask, /* R6 */
arr_index, /* R7 */
cb_logaddr, /* R8 */
0, 0, 0, 0); /* R9-R12 */
}
u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
const u8 reg_type, const u64 mc_mac_addr,
const u16 vlan_id, const u32 hcall_id)
{
u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
u64 mac_addr = mc_mac_addr >> 16;
r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
return ehea_plpar_hcall_norets(hcall_id,
adapter_handle, /* R4 */
r5_port_num, /* R5 */
r6_reg_type, /* R6 */
r7_mc_mac_addr, /* R7 */
r8_vlan_id, /* R8 */
0, 0); /* R9-R12 */
}
u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
const u64 event_mask)
{
return ehea_plpar_hcall_norets(H_RESET_EVENTS,
adapter_handle, /* R4 */
neq_handle, /* R5 */
event_mask, /* R6 */
0, 0, 0, 0); /* R7-R12 */
}
u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
void *rblock)
{
return ehea_plpar_hcall_norets(H_ERROR_DATA,
adapter_handle, /* R4 */
ressource_handle, /* R5 */
virt_to_abs(rblock), /* R6 */
0, 0, 0, 0); /* R7-R12 */
}

파일 보기

@@ -0,0 +1,467 @@
/*
* linux/drivers/net/ehea/ehea_phyp.h
*
* eHEA ethernet device driver for IBM eServer System p
*
* (C) Copyright IBM Corp. 2006
*
* Authors:
* Christoph Raisch <raisch@de.ibm.com>
* Jan-Bernd Themann <themann@de.ibm.com>
* Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __EHEA_PHYP_H__
#define __EHEA_PHYP_H__
#include <linux/delay.h>
#include <asm/hvcall.h>
#include "ehea.h"
#include "ehea_hw.h"
/* Some abbreviations used here:
*
* hcp_* - structures, variables and functions releated to Hypervisor Calls
*/
static inline u32 get_longbusy_msecs(int long_busy_ret_code)
{
switch (long_busy_ret_code) {
case H_LONG_BUSY_ORDER_1_MSEC:
return 1;
case H_LONG_BUSY_ORDER_10_MSEC:
return 10;
case H_LONG_BUSY_ORDER_100_MSEC:
return 100;
case H_LONG_BUSY_ORDER_1_SEC:
return 1000;
case H_LONG_BUSY_ORDER_10_SEC:
return 10000;
case H_LONG_BUSY_ORDER_100_SEC:
return 100000;
default:
return 1;
}
}
/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
#define EHEA_MAX_RPAGE 512
/* Notification Event Queue (NEQ) Entry bit masks */
#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
#define NEQE_PORT_UP EHEA_BMASK_IBM(16, 16)
#define NEQE_EXTSWITCH_PORT_UP EHEA_BMASK_IBM(17, 17)
#define NEQE_EXTSWITCH_PRIMARY EHEA_BMASK_IBM(18, 18)
#define NEQE_PLID EHEA_BMASK_IBM(16, 47)
/* Notification Event Codes */
#define EHEA_EC_PORTSTATE_CHG 0x30
#define EHEA_EC_ADAPTER_MALFUNC 0x32
#define EHEA_EC_PORT_MALFUNC 0x33
/* Notification Event Log Register (NELR) bit masks */
#define NELR_PORT_MALFUNC EHEA_BMASK_IBM(61, 61)
#define NELR_ADAPTER_MALFUNC EHEA_BMASK_IBM(62, 62)
#define NELR_PORTSTATE_CHG EHEA_BMASK_IBM(63, 63)
static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
u64 paddr_user)
{
/* To support 64k pages we must round to 64k page boundary */
epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
(paddr_kernel & ~PAGE_MASK);
epas->user.addr = paddr_user;
}
static inline void hcp_epas_dtor(struct h_epas *epas)
{
if (epas->kernel.addr)
iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
epas->user.addr = 0;
epas->kernel.addr = 0;
}
struct hcp_modify_qp_cb0 {
u64 qp_ctl_reg; /* 00 */
u32 max_swqe; /* 02 */
u32 max_rwqe; /* 03 */
u32 port_nb; /* 04 */
u32 reserved0; /* 05 */
u64 qp_aer; /* 06 */
u64 qp_tenure; /* 08 */
};
/* Hcall Query/Modify Queue Pair Control Block 0 Selection Mask Bits */
#define H_QPCB0_ALL EHEA_BMASK_IBM(0, 5)
#define H_QPCB0_QP_CTL_REG EHEA_BMASK_IBM(0, 0)
#define H_QPCB0_MAX_SWQE EHEA_BMASK_IBM(1, 1)
#define H_QPCB0_MAX_RWQE EHEA_BMASK_IBM(2, 2)
#define H_QPCB0_PORT_NB EHEA_BMASK_IBM(3, 3)
#define H_QPCB0_QP_AER EHEA_BMASK_IBM(4, 4)
#define H_QPCB0_QP_TENURE EHEA_BMASK_IBM(5, 5)
/* Queue Pair Control Register Status Bits */
#define H_QP_CR_ENABLED 0x8000000000000000ULL /* QP enabled */
/* QP States: */
#define H_QP_CR_STATE_RESET 0x0000010000000000ULL /* Reset */
#define H_QP_CR_STATE_INITIALIZED 0x0000020000000000ULL /* Initialized */
#define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */
#define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */
#define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */
#define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */
struct hcp_modify_qp_cb1 {
u32 qpn; /* 00 */
u32 qp_asyn_ev_eq_nb; /* 01 */
u64 sq_cq_handle; /* 02 */
u64 rq_cq_handle; /* 04 */
/* sgel = scatter gather element */
u32 sgel_nb_sq; /* 06 */
u32 sgel_nb_rq1; /* 07 */
u32 sgel_nb_rq2; /* 08 */
u32 sgel_nb_rq3; /* 09 */
};
/* Hcall Query/Modify Queue Pair Control Block 1 Selection Mask Bits */
#define H_QPCB1_ALL EHEA_BMASK_IBM(0, 7)
#define H_QPCB1_QPN EHEA_BMASK_IBM(0, 0)
#define H_QPCB1_ASYN_EV_EQ_NB EHEA_BMASK_IBM(1, 1)
#define H_QPCB1_SQ_CQ_HANDLE EHEA_BMASK_IBM(2, 2)
#define H_QPCB1_RQ_CQ_HANDLE EHEA_BMASK_IBM(3, 3)
#define H_QPCB1_SGEL_NB_SQ EHEA_BMASK_IBM(4, 4)
#define H_QPCB1_SGEL_NB_RQ1 EHEA_BMASK_IBM(5, 5)
#define H_QPCB1_SGEL_NB_RQ2 EHEA_BMASK_IBM(6, 6)
#define H_QPCB1_SGEL_NB_RQ3 EHEA_BMASK_IBM(7, 7)
struct hcp_query_ehea {
u32 cur_num_qps; /* 00 */
u32 cur_num_cqs; /* 01 */
u32 cur_num_eqs; /* 02 */
u32 cur_num_mrs; /* 03 */
u32 auth_level; /* 04 */
u32 max_num_qps; /* 05 */
u32 max_num_cqs; /* 06 */
u32 max_num_eqs; /* 07 */
u32 max_num_mrs; /* 08 */
u32 reserved0; /* 09 */
u32 int_clock_freq; /* 10 */
u32 max_num_pds; /* 11 */
u32 max_num_addr_handles; /* 12 */
u32 max_num_cqes; /* 13 */
u32 max_num_wqes; /* 14 */
u32 max_num_sgel_rq1wqe; /* 15 */
u32 max_num_sgel_rq2wqe; /* 16 */
u32 max_num_sgel_rq3wqe; /* 17 */
u32 mr_page_size; /* 18 */
u32 reserved1; /* 19 */
u64 max_mr_size; /* 20 */
u64 reserved2; /* 22 */
u32 num_ports; /* 24 */
u32 reserved3; /* 25 */
u32 reserved4; /* 26 */
u32 reserved5; /* 27 */
u64 max_mc_mac; /* 28 */
u64 ehea_cap; /* 30 */
u32 max_isn_per_eq; /* 32 */
u32 max_num_neq; /* 33 */
u64 max_num_vlan_ids; /* 34 */
u32 max_num_port_group; /* 36 */
u32 max_num_phys_port; /* 37 */
};
/* Hcall Query/Modify Port Control Block defines */
#define H_PORT_CB0 0
#define H_PORT_CB1 1
#define H_PORT_CB2 2
#define H_PORT_CB3 3
#define H_PORT_CB4 4
#define H_PORT_CB5 5
#define H_PORT_CB6 6
#define H_PORT_CB7 7
struct hcp_ehea_port_cb0 {
u64 port_mac_addr;
u64 port_rc;
u64 reserved0;
u32 port_op_state;
u32 port_speed;
u32 ext_swport_op_state;
u32 neg_tpf_prpf;
u32 num_default_qps;
u32 reserved1;
u64 default_qpn_arr[16];
};
/* Hcall Query/Modify Port Control Block 0 Selection Mask Bits */
#define H_PORT_CB0_ALL EHEA_BMASK_IBM(0, 7) /* Set all bits */
#define H_PORT_CB0_MAC EHEA_BMASK_IBM(0, 0) /* MAC address */
#define H_PORT_CB0_PRC EHEA_BMASK_IBM(1, 1) /* Port Recv Control */
#define H_PORT_CB0_DEFQPNARRAY EHEA_BMASK_IBM(7, 7) /* Default QPN Array */
/* Hcall Query Port: Returned port speed values */
#define H_SPEED_10M_H 1 /* 10 Mbps, Half Duplex */
#define H_SPEED_10M_F 2 /* 10 Mbps, Full Duplex */
#define H_SPEED_100M_H 3 /* 100 Mbps, Half Duplex */
#define H_SPEED_100M_F 4 /* 100 Mbps, Full Duplex */
#define H_SPEED_1G_F 6 /* 1 Gbps, Full Duplex */
#define H_SPEED_10G_F 8 /* 10 Gbps, Full Duplex */
/* Port Receive Control Status Bits */
#define PXLY_RC_VALID EHEA_BMASK_IBM(49, 49)
#define PXLY_RC_VLAN_XTRACT EHEA_BMASK_IBM(50, 50)
#define PXLY_RC_TCP_6_TUPLE EHEA_BMASK_IBM(51, 51)
#define PXLY_RC_UDP_6_TUPLE EHEA_BMASK_IBM(52, 52)
#define PXLY_RC_TCP_3_TUPLE EHEA_BMASK_IBM(53, 53)
#define PXLY_RC_TCP_2_TUPLE EHEA_BMASK_IBM(54, 54)
#define PXLY_RC_LLC_SNAP EHEA_BMASK_IBM(55, 55)
#define PXLY_RC_JUMBO_FRAME EHEA_BMASK_IBM(56, 56)
#define PXLY_RC_FRAG_IP_PKT EHEA_BMASK_IBM(57, 57)
#define PXLY_RC_TCP_UDP_CHKSUM EHEA_BMASK_IBM(58, 58)
#define PXLY_RC_IP_CHKSUM EHEA_BMASK_IBM(59, 59)
#define PXLY_RC_MAC_FILTER EHEA_BMASK_IBM(60, 60)
#define PXLY_RC_UNTAG_FILTER EHEA_BMASK_IBM(61, 61)
#define PXLY_RC_VLAN_TAG_FILTER EHEA_BMASK_IBM(62, 63)
#define PXLY_RC_VLAN_FILTER 2
#define PXLY_RC_VLAN_PERM 0
#define H_PORT_CB1_ALL 0x8000000000000000ULL
struct hcp_ehea_port_cb1 {
u64 vlan_filter[64];
};
#define H_PORT_CB2_ALL 0xFFE0000000000000ULL
struct hcp_ehea_port_cb2 {
u64 rxo;
u64 rxucp;
u64 rxufd;
u64 rxuerr;
u64 rxftl;
u64 rxmcp;
u64 rxbcp;
u64 txo;
u64 txucp;
u64 txmcp;
u64 txbcp;
};
struct hcp_ehea_port_cb3 {
u64 vlan_bc_filter[64];
u64 vlan_mc_filter[64];
u64 vlan_un_filter[64];
u64 port_mac_hash_array[64];
};
#define H_PORT_CB4_ALL 0xF000000000000000ULL
#define H_PORT_CB4_JUMBO 0x1000000000000000ULL
#define H_PORT_CB4_SPEED 0x8000000000000000ULL
struct hcp_ehea_port_cb4 {
u32 port_speed;
u32 pause_frame;
u32 ens_port_op_state;
u32 jumbo_frame;
u32 ens_port_wrap;
};
/* Hcall Query/Modify Port Control Block 5 Selection Mask Bits */
#define H_PORT_CB5_RCU 0x0001000000000000ULL
#define PXS_RCU EHEA_BMASK_IBM(61, 63)
struct hcp_ehea_port_cb5 {
u64 prc; /* 00 */
u64 uaa; /* 01 */
u64 macvc; /* 02 */
u64 xpcsc; /* 03 */
u64 xpcsp; /* 04 */
u64 pcsid; /* 05 */
u64 xpcsst; /* 06 */
u64 pthlb; /* 07 */
u64 pthrb; /* 08 */
u64 pqu; /* 09 */
u64 pqd; /* 10 */
u64 prt; /* 11 */
u64 wsth; /* 12 */
u64 rcb; /* 13 */
u64 rcm; /* 14 */
u64 rcu; /* 15 */
u64 macc; /* 16 */
u64 pc; /* 17 */
u64 pst; /* 18 */
u64 ducqpn; /* 19 */
u64 mcqpn; /* 20 */
u64 mma; /* 21 */
u64 pmc0h; /* 22 */
u64 pmc0l; /* 23 */
u64 lbc; /* 24 */
};
#define H_PORT_CB6_ALL 0xFFFFFE7FFFFF8000ULL
struct hcp_ehea_port_cb6 {
u64 rxo; /* 00 */
u64 rx64; /* 01 */
u64 rx65; /* 02 */
u64 rx128; /* 03 */
u64 rx256; /* 04 */
u64 rx512; /* 05 */
u64 rx1024; /* 06 */
u64 rxbfcs; /* 07 */
u64 rxime; /* 08 */
u64 rxrle; /* 09 */
u64 rxorle; /* 10 */
u64 rxftl; /* 11 */
u64 rxjab; /* 12 */
u64 rxse; /* 13 */
u64 rxce; /* 14 */
u64 rxrf; /* 15 */
u64 rxfrag; /* 16 */
u64 rxuoc; /* 17 */
u64 rxcpf; /* 18 */
u64 rxsb; /* 19 */
u64 rxfd; /* 20 */
u64 rxoerr; /* 21 */
u64 rxaln; /* 22 */
u64 ducqpn; /* 23 */
u64 reserved0; /* 24 */
u64 rxmcp; /* 25 */
u64 rxbcp; /* 26 */
u64 txmcp; /* 27 */
u64 txbcp; /* 28 */
u64 txo; /* 29 */
u64 tx64; /* 30 */
u64 tx65; /* 31 */
u64 tx128; /* 32 */
u64 tx256; /* 33 */
u64 tx512; /* 34 */
u64 tx1024; /* 35 */
u64 txbfcs; /* 36 */
u64 txcpf; /* 37 */
u64 txlf; /* 38 */
u64 txrf; /* 39 */
u64 txime; /* 40 */
u64 txsc; /* 41 */
u64 txmc; /* 42 */
u64 txsqe; /* 43 */
u64 txdef; /* 44 */
u64 txlcol; /* 45 */
u64 txexcol; /* 46 */
u64 txcse; /* 47 */
u64 txbor; /* 48 */
};
#define H_PORT_CB7_DUCQPN 0x8000000000000000ULL
struct hcp_ehea_port_cb7 {
u64 def_uc_qpn;
};
u64 ehea_h_query_ehea_qp(const u64 adapter_handle,
const u8 qp_category,
const u64 qp_handle, const u64 sel_mask,
void *cb_addr);
u64 ehea_h_modify_ehea_qp(const u64 adapter_handle,
const u8 cat,
const u64 qp_handle,
const u64 sel_mask,
void *cb_addr,
u64 *inv_attr_id,
u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
struct ehea_eq_attr *eq_attr, u64 *eq_handle);
u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
struct ehea_cq_attr *cq_attr,
u64 *cq_handle, struct h_epas *epas);
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
struct ehea_qp_init_attr *init_attr,
const u32 pd,
u64 *qp_handle, struct h_epas *h_epas);
#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55)
#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63)
u64 ehea_h_register_rpage(const u64 adapter_handle,
const u8 pagesize,
const u8 queue_type,
const u64 resource_handle,
const u64 log_pageaddr, u64 count);
#define H_DISABLE_GET_EHEA_WQE_P 1
#define H_DISABLE_GET_SQ_WQE_P 2
#define H_DISABLE_GET_RQC 3
u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle);
#define FORCE_FREE 1
#define NORMAL_FREE 0
u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
u64 force_bit);
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
const u64 length, const u32 access_ctrl,
const u32 pd, u64 *mr_handle, u32 *lkey);
u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
const u8 pagesize, const u8 queue_type,
const u64 log_pageaddr, const u64 count);
u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
struct ehea_mr *mr);
u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
/* output param R5 */
#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47)
#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63)
u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
const u8 cb_cat, const u64 select_mask,
void *cb_addr);
u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
const u8 cb_cat, const u64 select_mask,
void *cb_addr);
#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63)
#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
const u8 reg_type, const u64 mc_mac_addr,
const u16 vlan_id, const u32 hcall_id);
u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
const u64 event_mask);
u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
void *rblock);
#endif /* __EHEA_PHYP_H__ */

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다. Load Diff

파일 보기

@@ -0,0 +1,404 @@
/*
* linux/drivers/net/ehea/ehea_qmr.h
*
* eHEA ethernet device driver for IBM eServer System p
*
* (C) Copyright IBM Corp. 2006
*
* Authors:
* Christoph Raisch <raisch@de.ibm.com>
* Jan-Bernd Themann <themann@de.ibm.com>
* Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __EHEA_QMR_H__
#define __EHEA_QMR_H__
#include <linux/prefetch.h>
#include "ehea.h"
#include "ehea_hw.h"
/*
* page size of ehea hardware queues
*/
#define EHEA_PAGESHIFT 12
#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
#define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
#define EHEA_HUGEPAGESHIFT 34
#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
#endif
/* Some abbreviations used here:
*
* WQE - Work Queue Entry
* SWQE - Send Work Queue Entry
* RWQE - Receive Work Queue Entry
* CQE - Completion Queue Entry
* EQE - Event Queue Entry
* MR - Memory Region
*/
/* Use of WR_ID field for EHEA */
#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
#define EHEA_SWQE2_TYPE 0x1
#define EHEA_SWQE3_TYPE 0x2
#define EHEA_RWQE2_TYPE 0x3
#define EHEA_RWQE3_TYPE 0x4
#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
struct ehea_vsgentry {
u64 vaddr;
u32 l_key;
u32 len;
};
/* maximum number of sg entries allowed in a WQE */
#define EHEA_MAX_WQE_SG_ENTRIES 252
#define SWQE2_MAX_IMM (0xD0 - 0x30)
#define SWQE3_MAX_IMM 224
/* tx control flags for swqe */
#define EHEA_SWQE_CRC 0x8000
#define EHEA_SWQE_IP_CHECKSUM 0x4000
#define EHEA_SWQE_TCP_CHECKSUM 0x2000
#define EHEA_SWQE_TSO 0x1000
#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
#define EHEA_SWQE_VLAN_INSERT 0x0400
#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
#define EHEA_SWQE_WRAP_CTL_REC 0x0080
#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
#define EHEA_SWQE_BIND 0x0020
#define EHEA_SWQE_PURGE 0x0010
/* sizeof(struct ehea_swqe) less the union */
#define SWQE_HEADER_SIZE 32
struct ehea_swqe {
u64 wr_id;
u16 tx_control;
u16 vlan_tag;
u8 reserved1;
u8 ip_start;
u8 ip_end;
u8 immediate_data_length;
u8 tcp_offset;
u8 reserved2;
u16 tcp_end;
u8 wrap_tag;
u8 descriptors; /* number of valid descriptors in WQE */
u16 reserved3;
u16 reserved4;
u16 mss;
u32 reserved5;
union {
/* Send WQE Format 1 */
struct {
struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
} no_immediate_data;
/* Send WQE Format 2 */
struct {
struct ehea_vsgentry sg_entry;
/* 0x30 */
u8 immediate_data[SWQE2_MAX_IMM];
/* 0xd0 */
struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
} immdata_desc __packed;
/* Send WQE Format 3 */
struct {
u8 immediate_data[SWQE3_MAX_IMM];
} immdata_nodesc;
} u;
};
struct ehea_rwqe {
u64 wr_id; /* work request ID */
u8 reserved1[5];
u8 data_segments;
u16 reserved2;
u64 reserved3;
u64 reserved4;
struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
};
#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
#define EHEA_CQE_TYPE_RQ 0x60
#define EHEA_CQE_STAT_ERR_MASK 0x700F
#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
#define EHEA_CQE_BLIND_CKSUM 0x8000
#define EHEA_CQE_STAT_ERR_TCP 0x4000
#define EHEA_CQE_STAT_ERR_IP 0x2000
#define EHEA_CQE_STAT_ERR_CRC 0x1000
/* Defines which bad send cqe stati lead to a port reset */
#define EHEA_CQE_STAT_RESET_MASK 0x0002
struct ehea_cqe {
u64 wr_id; /* work request ID from WQE */
u8 type;
u8 valid;
u16 status;
u16 reserved1;
u16 num_bytes_transfered;
u16 vlan_tag;
u16 inet_checksum_value;
u8 reserved2;
u8 header_length;
u16 reserved3;
u16 page_offset;
u16 wqe_count;
u32 qp_token;
u32 timestamp;
u32 reserved4;
u64 reserved5[3];
};
#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
#define EHEA_AER_RESTYPE_QP 0x8
#define EHEA_AER_RESTYPE_CQ 0x4
#define EHEA_AER_RESTYPE_EQ 0x3
/* Defines which affiliated errors lead to a port reset */
#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
struct ehea_eqe {
u64 entry;
};
#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
{
struct ehea_page *current_page;
if (q_offset >= queue->queue_length)
q_offset -= queue->queue_length;
current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
}
static inline void *hw_qeit_get(struct hw_queue *queue)
{
return hw_qeit_calc(queue, queue->current_q_offset);
}
static inline void hw_qeit_inc(struct hw_queue *queue)
{
queue->current_q_offset += queue->qe_size;
if (queue->current_q_offset >= queue->queue_length) {
queue->current_q_offset = 0;
/* toggle the valid flag */
queue->toggle_state = (~queue->toggle_state) & 1;
}
}
static inline void *hw_qeit_get_inc(struct hw_queue *queue)
{
void *retvalue = hw_qeit_get(queue);
hw_qeit_inc(queue);
return retvalue;
}
static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
{
struct ehea_cqe *retvalue = hw_qeit_get(queue);
u8 valid = retvalue->valid;
void *pref;
if ((valid >> 7) == (queue->toggle_state & 1)) {
/* this is a good one */
hw_qeit_inc(queue);
pref = hw_qeit_calc(queue, queue->current_q_offset);
prefetch(pref);
prefetch(pref + 128);
} else
retvalue = NULL;
return retvalue;
}
static inline void *hw_qeit_get_valid(struct hw_queue *queue)
{
struct ehea_cqe *retvalue = hw_qeit_get(queue);
void *pref;
u8 valid;
pref = hw_qeit_calc(queue, queue->current_q_offset);
prefetch(pref);
prefetch(pref + 128);
prefetch(pref + 256);
valid = retvalue->valid;
if (!((valid >> 7) == (queue->toggle_state & 1)))
retvalue = NULL;
return retvalue;
}
static inline void *hw_qeit_reset(struct hw_queue *queue)
{
queue->current_q_offset = 0;
return hw_qeit_get(queue);
}
static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
{
u64 last_entry_in_q = queue->queue_length - queue->qe_size;
void *retvalue;
retvalue = hw_qeit_get(queue);
queue->current_q_offset += queue->qe_size;
if (queue->current_q_offset > last_entry_in_q) {
queue->current_q_offset = 0;
queue->toggle_state = (~queue->toggle_state) & 1;
}
return retvalue;
}
static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
{
void *retvalue = hw_qeit_get(queue);
u32 qe = *(u8 *)retvalue;
if ((qe >> 7) == (queue->toggle_state & 1))
hw_qeit_eq_get_inc(queue);
else
retvalue = NULL;
return retvalue;
}
static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
int rq_nr)
{
struct hw_queue *queue;
if (rq_nr == 1)
queue = &qp->hw_rqueue1;
else if (rq_nr == 2)
queue = &qp->hw_rqueue2;
else
queue = &qp->hw_rqueue3;
return hw_qeit_get_inc(queue);
}
static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
int *wqe_index)
{
struct hw_queue *queue = &my_qp->hw_squeue;
struct ehea_swqe *wqe_p;
*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
return wqe_p;
}
static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
{
iosync();
ehea_update_sqa(my_qp, 1);
}
static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
{
struct hw_queue *queue = &qp->hw_rqueue1;
*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
return hw_qeit_get_valid(queue);
}
static inline void ehea_inc_cq(struct ehea_cq *cq)
{
hw_qeit_inc(&cq->hw_queue);
}
static inline void ehea_inc_rq1(struct ehea_qp *qp)
{
hw_qeit_inc(&qp->hw_rqueue1);
}
static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
{
return hw_qeit_get_valid(&my_cq->hw_queue);
}
#define EHEA_CQ_REGISTER_ORIG 0
#define EHEA_EQ_REGISTER_ORIG 0
enum ehea_eq_type {
EHEA_EQ = 0, /* event queue */
EHEA_NEQ /* notification event queue */
};
struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
enum ehea_eq_type type,
const u32 length, const u8 eqe_gen);
int ehea_destroy_eq(struct ehea_eq *eq);
struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
u64 eq_handle, u32 cq_token);
int ehea_destroy_cq(struct ehea_cq *cq);
struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
struct ehea_qp_init_attr *init_attr);
int ehea_destroy_qp(struct ehea_qp *qp);
int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
struct ehea_mr *shared_mr);
int ehea_rem_mr(struct ehea_mr *mr);
u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
u64 *aer, u64 *aerr);
int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
int ehea_create_busmap(void);
void ehea_destroy_busmap(void);
u64 ehea_map_vaddr(void *caddr);
#endif /* __EHEA_QMR_H__ */

파일 보기

@@ -0,0 +1,76 @@
config IBM_EMAC
tristate "IBM EMAC Ethernet support"
depends on PPC_DCR
select CRC32
help
This driver supports the IBM EMAC family of Ethernet controllers
typically found on 4xx embedded PowerPC chips, but also on the
Axon southbridge for Cell.
config IBM_EMAC_RXB
int "Number of receive buffers"
depends on IBM_EMAC
default "128"
config IBM_EMAC_TXB
int "Number of transmit buffers"
depends on IBM_EMAC
default "64"
config IBM_EMAC_POLL_WEIGHT
int "MAL NAPI polling weight"
depends on IBM_EMAC
default "32"
config IBM_EMAC_RX_COPY_THRESHOLD
int "RX skb copy threshold (bytes)"
depends on IBM_EMAC
default "256"
config IBM_EMAC_RX_SKB_HEADROOM
int "Additional RX skb headroom (bytes)"
depends on IBM_EMAC
default "0"
help
Additional receive skb headroom. Note, that driver
will always reserve at least 2 bytes to make IP header
aligned, so usually there is no need to add any additional
headroom.
If unsure, set to 0.
config IBM_EMAC_DEBUG
bool "Debugging"
depends on IBM_EMAC
default n
# The options below has to be select'ed by the respective
# processor types or platforms
config IBM_EMAC_ZMII
bool
default n
config IBM_EMAC_RGMII
bool
default n
config IBM_EMAC_TAH
bool
default n
config IBM_EMAC_EMAC4
bool
default n
config IBM_EMAC_NO_FLOW_CTRL
bool
default n
config IBM_EMAC_MAL_CLR_ICINTSTAT
bool
default n
config IBM_EMAC_MAL_COMMON_ERR
bool
default n

파일 보기

@@ -0,0 +1,11 @@
#
# Makefile for the PowerPC 4xx on-chip ethernet driver
#
obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac.o
ibm_newemac-y := mal.o core.o phy.o
ibm_newemac-$(CONFIG_IBM_NEW_EMAC_ZMII) += zmii.o
ibm_newemac-$(CONFIG_IBM_NEW_EMAC_RGMII) += rgmii.o
ibm_newemac-$(CONFIG_IBM_NEW_EMAC_TAH) += tah.o
ibm_newemac-$(CONFIG_IBM_NEW_EMAC_DEBUG) += debug.o

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다. Load Diff

파일 보기

@@ -0,0 +1,462 @@
/*
* drivers/net/ibm_newemac/core.h
*
* Driver for PowerPC 4xx on-chip ethernet controller.
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright (c) 2004, 2005 Zultys Technologies.
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* Based on original work by
* Armin Kuster <akuster@mvista.com>
* Johnnie Peters <jpeters@mvista.com>
* Copyright 2000, 2001 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __IBM_NEWEMAC_CORE_H
#define __IBM_NEWEMAC_CORE_H
#include <linux/module.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/dcr.h>
#include "emac.h"
#include "phy.h"
#include "zmii.h"
#include "rgmii.h"
#include "mal.h"
#include "tah.h"
#include "debug.h"
#define NUM_TX_BUFF CONFIG_IBM_NEW_EMAC_TXB
#define NUM_RX_BUFF CONFIG_IBM_NEW_EMAC_RXB
/* Simple sanity check */
#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256
#error Invalid number of buffer descriptors (greater than 256)
#endif
#define EMAC_MIN_MTU 46
/* Maximum L2 header length (VLAN tagged, no FCS) */
#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4)
/* RX BD size for the given MTU */
static inline int emac_rx_size(int mtu)
{
if (mtu > ETH_DATA_LEN)
return MAL_MAX_RX_SIZE;
else
return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
}
#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
#define EMAC_RX_SKB_HEADROOM \
EMAC_DMA_ALIGN(CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM)
/* Size of RX skb for the given MTU */
static inline int emac_rx_skb_size(int mtu)
{
int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
}
/* RX DMA sync size */
static inline int emac_rx_sync_size(int mtu)
{
return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
}
/* Driver statistcs is split into two parts to make it more cache friendly:
* - normal statistics (packet count, etc)
* - error statistics
*
* When statistics is requested by ethtool, these parts are concatenated,
* normal one goes first.
*
* Please, keep these structures in sync with emac_stats_keys.
*/
/* Normal TX/RX Statistics */
struct emac_stats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
u64 rx_packets_csum;
u64 tx_packets_csum;
};
/* Error statistics */
struct emac_error_stats {
u64 tx_undo;
/* Software RX Errors */
u64 rx_dropped_stack;
u64 rx_dropped_oom;
u64 rx_dropped_error;
u64 rx_dropped_resize;
u64 rx_dropped_mtu;
u64 rx_stopped;
/* BD reported RX errors */
u64 rx_bd_errors;
u64 rx_bd_overrun;
u64 rx_bd_bad_packet;
u64 rx_bd_runt_packet;
u64 rx_bd_short_event;
u64 rx_bd_alignment_error;
u64 rx_bd_bad_fcs;
u64 rx_bd_packet_too_long;
u64 rx_bd_out_of_range;
u64 rx_bd_in_range;
/* EMAC IRQ reported RX errors */
u64 rx_parity;
u64 rx_fifo_overrun;
u64 rx_overrun;
u64 rx_bad_packet;
u64 rx_runt_packet;
u64 rx_short_event;
u64 rx_alignment_error;
u64 rx_bad_fcs;
u64 rx_packet_too_long;
u64 rx_out_of_range;
u64 rx_in_range;
/* Software TX Errors */
u64 tx_dropped;
/* BD reported TX errors */
u64 tx_bd_errors;
u64 tx_bd_bad_fcs;
u64 tx_bd_carrier_loss;
u64 tx_bd_excessive_deferral;
u64 tx_bd_excessive_collisions;
u64 tx_bd_late_collision;
u64 tx_bd_multple_collisions;
u64 tx_bd_single_collision;
u64 tx_bd_underrun;
u64 tx_bd_sqe;
/* EMAC IRQ reported TX errors */
u64 tx_parity;
u64 tx_underrun;
u64 tx_sqe;
u64 tx_errors;
};
#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \
sizeof(struct emac_error_stats)) \
/ sizeof(u64))
struct emac_instance {
struct net_device *ndev;
struct resource rsrc_regs;
struct emac_regs __iomem *emacp;
struct platform_device *ofdev;
struct device_node **blist; /* bootlist entry */
/* MAL linkage */
u32 mal_ph;
struct platform_device *mal_dev;
u32 mal_rx_chan;
u32 mal_tx_chan;
struct mal_instance *mal;
struct mal_commac commac;
/* PHY infos */
u32 phy_mode;
u32 phy_map;
u32 phy_address;
u32 phy_feat_exc;
struct mii_phy phy;
struct mutex link_lock;
struct delayed_work link_work;
int link_polling;
/* GPCS PHY infos */
u32 gpcs_address;
/* Shared MDIO if any */
u32 mdio_ph;
struct platform_device *mdio_dev;
struct emac_instance *mdio_instance;
struct mutex mdio_lock;
/* ZMII infos if any */
u32 zmii_ph;
u32 zmii_port;
struct platform_device *zmii_dev;
/* RGMII infos if any */
u32 rgmii_ph;
u32 rgmii_port;
struct platform_device *rgmii_dev;
/* TAH infos if any */
u32 tah_ph;
u32 tah_port;
struct platform_device *tah_dev;
/* IRQs */
int wol_irq;
int emac_irq;
/* OPB bus frequency in Mhz */
u32 opb_bus_freq;
/* Cell index within an ASIC (for clk mgmnt) */
u32 cell_index;
/* Max supported MTU */
u32 max_mtu;
/* Feature bits (from probe table) */
unsigned int features;
/* Tx and Rx fifo sizes & other infos in bytes */
u32 tx_fifo_size;
u32 tx_fifo_size_gige;
u32 rx_fifo_size;
u32 rx_fifo_size_gige;
u32 fifo_entry_size;
u32 mal_burst_size; /* move to MAL ? */
/* IAHT and GAHT filter parameterization */
u32 xaht_slots_shift;
u32 xaht_width_shift;
/* Descriptor management
*/
struct mal_descriptor *tx_desc;
int tx_cnt;
int tx_slot;
int ack_slot;
struct mal_descriptor *rx_desc;
int rx_slot;
struct sk_buff *rx_sg_skb; /* 1 */
int rx_skb_size;
int rx_sync_size;
struct sk_buff *tx_skb[NUM_TX_BUFF];
struct sk_buff *rx_skb[NUM_RX_BUFF];
/* Stats
*/
struct emac_error_stats estats;
struct net_device_stats nstats;
struct emac_stats stats;
/* Misc
*/
int reset_failed;
int stop_timeout; /* in us */
int no_mcast;
int mcast_pending;
int opened;
struct work_struct reset_work;
spinlock_t lock;
};
/*
* Features of various EMAC implementations
*/
/*
* No flow control on 40x according to the original driver
*/
#define EMAC_FTR_NO_FLOW_CONTROL_40x 0x00000001
/*
* Cell is an EMAC4
*/
#define EMAC_FTR_EMAC4 0x00000002
/*
* For the 440SPe, AMCC inexplicably changed the polarity of
* the "operation complete" bit in the MII control register.
*/
#define EMAC_FTR_STACR_OC_INVERT 0x00000004
/*
* Set if we have a TAH.
*/
#define EMAC_FTR_HAS_TAH 0x00000008
/*
* Set if we have a ZMII.
*/
#define EMAC_FTR_HAS_ZMII 0x00000010
/*
* Set if we have a RGMII.
*/
#define EMAC_FTR_HAS_RGMII 0x00000020
/*
* Set if we have new type STACR with STAOPC
*/
#define EMAC_FTR_HAS_NEW_STACR 0x00000040
/*
* Set if we need phy clock workaround for 440gx
*/
#define EMAC_FTR_440GX_PHY_CLK_FIX 0x00000080
/*
* Set if we need phy clock workaround for 440ep or 440gr
*/
#define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100
/*
* The 405EX and 460EX contain the EMAC4SYNC core
*/
#define EMAC_FTR_EMAC4SYNC 0x00000200
/*
* Set if we need phy clock workaround for 460ex or 460gt
*/
#define EMAC_FTR_460EX_PHY_CLK_FIX 0x00000400
/* Right now, we don't quite handle the always/possible masks on the
* most optimal way as we don't have a way to say something like
* always EMAC4. Patches welcome.
*/
enum {
EMAC_FTRS_ALWAYS = 0,
EMAC_FTRS_POSSIBLE =
#ifdef CONFIG_IBM_NEW_EMAC_EMAC4
EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC |
EMAC_FTR_HAS_NEW_STACR |
EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX |
#endif
#ifdef CONFIG_IBM_NEW_EMAC_TAH
EMAC_FTR_HAS_TAH |
#endif
#ifdef CONFIG_IBM_NEW_EMAC_ZMII
EMAC_FTR_HAS_ZMII |
#endif
#ifdef CONFIG_IBM_NEW_EMAC_RGMII
EMAC_FTR_HAS_RGMII |
#endif
#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
EMAC_FTR_NO_FLOW_CONTROL_40x |
#endif
EMAC_FTR_460EX_PHY_CLK_FIX |
EMAC_FTR_440EP_PHY_CLK_FIX,
};
static inline int emac_has_feature(struct emac_instance *dev,
unsigned long feature)
{
return (EMAC_FTRS_ALWAYS & feature) ||
(EMAC_FTRS_POSSIBLE & dev->features & feature);
}
/*
* Various instances of the EMAC core have varying 1) number of
* address match slots, 2) width of the registers for handling address
* match slots, 3) number of registers for handling address match
* slots and 4) base offset for those registers.
*
* These macros and inlines handle these differences based on
* parameters supplied by the device structure which are, in turn,
* initialized based on the "compatible" entry in the device tree.
*/
#define EMAC4_XAHT_SLOTS_SHIFT 6
#define EMAC4_XAHT_WIDTH_SHIFT 4
#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8
#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5
#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift)
#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift)
#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \
(dev)->xaht_width_shift))
#define EMAC_XAHT_CRC_TO_SLOT(dev, crc) \
((EMAC_XAHT_SLOTS(dev) - 1) - \
((crc) >> ((sizeof (u32) * BITS_PER_BYTE) - \
(dev)->xaht_slots_shift)))
#define EMAC_XAHT_SLOT_TO_REG(dev, slot) \
((slot) >> (dev)->xaht_width_shift)
#define EMAC_XAHT_SLOT_TO_MASK(dev, slot) \
((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
static inline u32 *emac_xaht_base(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
int offset;
/* The first IAHT entry always is the base of the block of
* IAHT and GAHT registers.
*/
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC))
offset = offsetof(struct emac_regs, u1.emac4sync.iaht1);
else
offset = offsetof(struct emac_regs, u0.emac4.iaht1);
return (u32 *)((ptrdiff_t)p + offset);
}
static inline u32 *emac_gaht_base(struct emac_instance *dev)
{
/* GAHT registers always come after an identical number of
* IAHT registers.
*/
return emac_xaht_base(dev) + EMAC_XAHT_REGS(dev);
}
static inline u32 *emac_iaht_base(struct emac_instance *dev)
{
/* IAHT registers always come before an identical number of
* GAHT registers.
*/
return emac_xaht_base(dev);
}
/* Ethtool get_regs complex data.
* We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH
* when available.
*
* Returned BLOB consists of the ibm_emac_ethtool_regs_hdr,
* MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers.
* Each register component is preceded with emac_ethtool_regs_subhdr.
* Order of the optional headers follows their relative bit posititions
* in emac_ethtool_regs_hdr.components
*/
#define EMAC_ETHTOOL_REGS_ZMII 0x00000001
#define EMAC_ETHTOOL_REGS_RGMII 0x00000002
#define EMAC_ETHTOOL_REGS_TAH 0x00000004
struct emac_ethtool_regs_hdr {
u32 components;
};
struct emac_ethtool_regs_subhdr {
u32 version;
u32 index;
};
#define EMAC_ETHTOOL_REGS_VER 0
#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
(dev)->rsrc_regs.start + 1)
#define EMAC4_ETHTOOL_REGS_VER 1
#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
(dev)->rsrc_regs.start + 1)
#endif /* __IBM_NEWEMAC_CORE_H */

파일 보기

@@ -0,0 +1,270 @@
/*
* drivers/net/ibm_newemac/debug.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright (c) 2004, 2005 Zultys Technologies
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/sysrq.h>
#include <asm/io.h>
#include "core.h"
static DEFINE_SPINLOCK(emac_dbg_lock);
static void emac_desc_dump(struct emac_instance *p)
{
int i;
printk("** EMAC %s TX BDs **\n"
" tx_cnt = %d tx_slot = %d ack_slot = %d\n",
p->ofdev->dev.of_node->full_name,
p->tx_cnt, p->tx_slot, p->ack_slot);
for (i = 0; i < NUM_TX_BUFF / 2; ++i)
printk
("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ',
p->tx_desc[i].ctrl, p->tx_desc[i].data_len,
NUM_TX_BUFF / 2 + i,
p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr,
p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ',
p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl,
p->tx_desc[NUM_TX_BUFF / 2 + i].data_len);
printk("** EMAC %s RX BDs **\n"
" rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
" rx_sg_skb = 0x%p\n",
p->ofdev->dev.of_node->full_name,
p->rx_slot, p->commac.flags, p->rx_skb_size,
p->rx_sync_size, p->rx_sg_skb);
for (i = 0; i < NUM_RX_BUFF / 2; ++i)
printk
("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ',
p->rx_desc[i].ctrl, p->rx_desc[i].data_len,
NUM_RX_BUFF / 2 + i,
p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr,
p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ',
p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl,
p->rx_desc[NUM_RX_BUFF / 2 + i].data_len);
}
static void emac_mac_dump(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
const int xaht_regs = EMAC_XAHT_REGS(dev);
u32 *gaht_base = emac_gaht_base(dev);
u32 *iaht_base = emac_iaht_base(dev);
int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC);
int n;
printk("** EMAC %s registers **\n"
"MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
"RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
"IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
dev->ofdev->dev.of_node->full_name,
in_be32(&p->mr0), in_be32(&p->mr1),
in_be32(&p->tmr0), in_be32(&p->tmr1),
in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
in_be32(&p->vtci)
);
if (emac4sync)
printk("MAR = %04x%08x MMAR = %04x%08x\n",
in_be32(&p->u0.emac4sync.mahr),
in_be32(&p->u0.emac4sync.malr),
in_be32(&p->u0.emac4sync.mmahr),
in_be32(&p->u0.emac4sync.mmalr)
);
for (n = 0; n < xaht_regs; n++)
printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n));
for (n = 0; n < xaht_regs; n++)
printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n));
printk("LSA = %04x%08x IPGVR = 0x%04x\n"
"STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
"OCTX = 0x%08x OCRX = 0x%08x\n",
in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
in_be32(&p->octx), in_be32(&p->ocrx)
);
if (!emac4sync) {
printk("IPCR = 0x%08x\n",
in_be32(&p->u1.emac4.ipcr)
);
} else {
printk("REVID = 0x%08x TPC = 0x%08x\n",
in_be32(&p->u1.emac4sync.revid),
in_be32(&p->u1.emac4sync.tpc)
);
}
emac_desc_dump(dev);
}
static void emac_mal_dump(struct mal_instance *mal)
{
int i;
printk("** MAL %s Registers **\n"
"CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
"TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
"RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
mal->ofdev->dev.of_node->full_name,
get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
get_mal_dcrn(mal, MAL_IER),
get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR),
get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR),
get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR)
);
printk("TX|");
for (i = 0; i < mal->num_tx_chans; ++i) {
if (i && !(i % 4))
printk("\n ");
printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i)));
}
printk("\nRX|");
for (i = 0; i < mal->num_rx_chans; ++i) {
if (i && !(i % 4))
printk("\n ");
printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i)));
}
printk("\n ");
for (i = 0; i < mal->num_rx_chans; ++i) {
u32 r = get_mal_dcrn(mal, MAL_RCBS(i));
if (i && !(i % 3))
printk("\n ");
printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16);
}
printk("\n");
}
static struct emac_instance *__emacs[4];
static struct mal_instance *__mals[1];
void emac_dbg_register(struct emac_instance *dev)
{
unsigned long flags;
int i;
spin_lock_irqsave(&emac_dbg_lock, flags);
for (i = 0; i < ARRAY_SIZE(__emacs); i++)
if (__emacs[i] == NULL) {
__emacs[i] = dev;
break;
}
spin_unlock_irqrestore(&emac_dbg_lock, flags);
}
void emac_dbg_unregister(struct emac_instance *dev)
{
unsigned long flags;
int i;
spin_lock_irqsave(&emac_dbg_lock, flags);
for (i = 0; i < ARRAY_SIZE(__emacs); i++)
if (__emacs[i] == dev) {
__emacs[i] = NULL;
break;
}
spin_unlock_irqrestore(&emac_dbg_lock, flags);
}
void mal_dbg_register(struct mal_instance *mal)
{
unsigned long flags;
int i;
spin_lock_irqsave(&emac_dbg_lock, flags);
for (i = 0; i < ARRAY_SIZE(__mals); i++)
if (__mals[i] == NULL) {
__mals[i] = mal;
break;
}
spin_unlock_irqrestore(&emac_dbg_lock, flags);
}
void mal_dbg_unregister(struct mal_instance *mal)
{
unsigned long flags;
int i;
spin_lock_irqsave(&emac_dbg_lock, flags);
for (i = 0; i < ARRAY_SIZE(__mals); i++)
if (__mals[i] == mal) {
__mals[i] = NULL;
break;
}
spin_unlock_irqrestore(&emac_dbg_lock, flags);
}
void emac_dbg_dump_all(void)
{
unsigned int i;
unsigned long flags;
spin_lock_irqsave(&emac_dbg_lock, flags);
for (i = 0; i < ARRAY_SIZE(__mals); ++i)
if (__mals[i])
emac_mal_dump(__mals[i]);
for (i = 0; i < ARRAY_SIZE(__emacs); ++i)
if (__emacs[i])
emac_mac_dump(__emacs[i]);
spin_unlock_irqrestore(&emac_dbg_lock, flags);
}
#if defined(CONFIG_MAGIC_SYSRQ)
static void emac_sysrq_handler(int key)
{
emac_dbg_dump_all();
}
static struct sysrq_key_op emac_sysrq_op = {
.handler = emac_sysrq_handler,
.help_msg = "emaC",
.action_msg = "Show EMAC(s) status",
};
int __init emac_init_debug(void)
{
return register_sysrq_key('c', &emac_sysrq_op);
}
void __exit emac_fini_debug(void)
{
unregister_sysrq_key('c', &emac_sysrq_op);
}
#else
int __init emac_init_debug(void)
{
return 0;
}
void __exit emac_fini_debug(void)
{
}
#endif /* CONFIG_MAGIC_SYSRQ */

파일 보기

@@ -0,0 +1,83 @@
/*
* drivers/net/ibm_newemac/debug.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright (c) 2004, 2005 Zultys Technologies
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __IBM_NEWEMAC_DEBUG_H
#define __IBM_NEWEMAC_DEBUG_H
#include <linux/init.h>
#include "core.h"
#if defined(CONFIG_IBM_NEW_EMAC_DEBUG)
struct emac_instance;
struct mal_instance;
extern void emac_dbg_register(struct emac_instance *dev);
extern void emac_dbg_unregister(struct emac_instance *dev);
extern void mal_dbg_register(struct mal_instance *mal);
extern void mal_dbg_unregister(struct mal_instance *mal);
extern int emac_init_debug(void) __init;
extern void emac_fini_debug(void) __exit;
extern void emac_dbg_dump_all(void);
# define DBG_LEVEL 1
#else
# define emac_dbg_register(x) do { } while(0)
# define emac_dbg_unregister(x) do { } while(0)
# define mal_dbg_register(x) do { } while(0)
# define mal_dbg_unregister(x) do { } while(0)
# define emac_init_debug() do { } while(0)
# define emac_fini_debug() do { } while(0)
# define emac_dbg_dump_all() do { } while(0)
# define DBG_LEVEL 0
#endif
#define EMAC_DBG(d, name, fmt, arg...) \
printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg)
#if DBG_LEVEL > 0
# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x)
# define MAL_DBG(d,f,x...) EMAC_DBG(d, mal, f, ##x)
# define ZMII_DBG(d,f,x...) EMAC_DBG(d, zmii, f, ##x)
# define RGMII_DBG(d,f,x...) EMAC_DBG(d, rgmii, f, ##x)
# define NL "\n"
#else
# define DBG(f,x...) ((void)0)
# define MAL_DBG(d,f,x...) ((void)0)
# define ZMII_DBG(d,f,x...) ((void)0)
# define RGMII_DBG(d,f,x...) ((void)0)
#endif
#if DBG_LEVEL > 1
# define DBG2(d,f,x...) DBG(d,f, ##x)
# define MAL_DBG2(d,f,x...) MAL_DBG(d,f, ##x)
# define ZMII_DBG2(d,f,x...) ZMII_DBG(d,f, ##x)
# define RGMII_DBG2(d,f,x...) RGMII_DBG(d,f, ##x)
#else
# define DBG2(f,x...) ((void)0)
# define MAL_DBG2(d,f,x...) ((void)0)
# define ZMII_DBG2(d,f,x...) ((void)0)
# define RGMII_DBG2(d,f,x...) ((void)0)
#endif
#endif /* __IBM_NEWEMAC_DEBUG_H */

파일 보기

@@ -0,0 +1,312 @@
/*
* drivers/net/ibm_newemac/emac.h
*
* Register definitions for PowerPC 4xx on-chip ethernet contoller
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright (c) 2004, 2005 Zultys Technologies.
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* Based on original work by
* Matt Porter <mporter@kernel.crashing.org>
* Armin Kuster <akuster@mvista.com>
* Copyright 2002-2004 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __IBM_NEWEMAC_H
#define __IBM_NEWEMAC_H
#include <linux/types.h>
#include <linux/phy.h>
/* EMAC registers Write Access rules */
struct emac_regs {
/* Common registers across all EMAC implementations. */
u32 mr0; /* Special */
u32 mr1; /* Reset */
u32 tmr0; /* Special */
u32 tmr1; /* Special */
u32 rmr; /* Reset */
u32 isr; /* Always */
u32 iser; /* Reset */
u32 iahr; /* Reset, R, T */
u32 ialr; /* Reset, R, T */
u32 vtpid; /* Reset, R, T */
u32 vtci; /* Reset, R, T */
u32 ptr; /* Reset, T */
union {
/* Registers unique to EMAC4 implementations */
struct {
u32 iaht1; /* Reset, R */
u32 iaht2; /* Reset, R */
u32 iaht3; /* Reset, R */
u32 iaht4; /* Reset, R */
u32 gaht1; /* Reset, R */
u32 gaht2; /* Reset, R */
u32 gaht3; /* Reset, R */
u32 gaht4; /* Reset, R */
} emac4;
/* Registers unique to EMAC4SYNC implementations */
struct {
u32 mahr; /* Reset, R, T */
u32 malr; /* Reset, R, T */
u32 mmahr; /* Reset, R, T */
u32 mmalr; /* Reset, R, T */
u32 rsvd0[4];
} emac4sync;
} u0;
/* Common registers across all EMAC implementations. */
u32 lsah;
u32 lsal;
u32 ipgvr; /* Reset, T */
u32 stacr; /* Special */
u32 trtr; /* Special */
u32 rwmr; /* Reset */
u32 octx;
u32 ocrx;
union {
/* Registers unique to EMAC4 implementations */
struct {
u32 ipcr;
} emac4;
/* Registers unique to EMAC4SYNC implementations */
struct {
u32 rsvd1;
u32 revid;
u32 rsvd2[2];
u32 iaht1; /* Reset, R */
u32 iaht2; /* Reset, R */
u32 iaht3; /* Reset, R */
u32 iaht4; /* Reset, R */
u32 iaht5; /* Reset, R */
u32 iaht6; /* Reset, R */
u32 iaht7; /* Reset, R */
u32 iaht8; /* Reset, R */
u32 gaht1; /* Reset, R */
u32 gaht2; /* Reset, R */
u32 gaht3; /* Reset, R */
u32 gaht4; /* Reset, R */
u32 gaht5; /* Reset, R */
u32 gaht6; /* Reset, R */
u32 gaht7; /* Reset, R */
u32 gaht8; /* Reset, R */
u32 tpc; /* Reset, T */
} emac4sync;
} u1;
};
/*
* PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
*/
#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
/* EMACx_MR0 */
#define EMAC_MR0_RXI 0x80000000
#define EMAC_MR0_TXI 0x40000000
#define EMAC_MR0_SRST 0x20000000
#define EMAC_MR0_TXE 0x10000000
#define EMAC_MR0_RXE 0x08000000
#define EMAC_MR0_WKE 0x04000000
/* EMACx_MR1 */
#define EMAC_MR1_FDE 0x80000000
#define EMAC_MR1_ILE 0x40000000
#define EMAC_MR1_VLE 0x20000000
#define EMAC_MR1_EIFC 0x10000000
#define EMAC_MR1_APP 0x08000000
#define EMAC_MR1_IST 0x01000000
#define EMAC_MR1_MF_MASK 0x00c00000
#define EMAC_MR1_MF_10 0x00000000
#define EMAC_MR1_MF_100 0x00400000
#define EMAC_MR1_MF_1000 0x00800000
#define EMAC_MR1_MF_1000GPCS 0x00c00000
#define EMAC_MR1_MF_IPPA(id) (((id) & 0x1f) << 6)
#define EMAC_MR1_RFS_4K 0x00300000
#define EMAC_MR1_RFS_16K 0x00000000
#define EMAC_MR1_TFS_2K 0x00080000
#define EMAC_MR1_TR0_MULT 0x00008000
#define EMAC_MR1_JPSM 0x00000000
#define EMAC_MR1_MWSW_001 0x00000000
#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
#define EMAC4_MR1_RFS_2K 0x00100000
#define EMAC4_MR1_RFS_4K 0x00180000
#define EMAC4_MR1_RFS_16K 0x00280000
#define EMAC4_MR1_TFS_2K 0x00020000
#define EMAC4_MR1_TFS_4K 0x00030000
#define EMAC4_MR1_TFS_16K 0x00050000
#define EMAC4_MR1_TR 0x00008000
#define EMAC4_MR1_MWSW_001 0x00001000
#define EMAC4_MR1_JPSM 0x00000800
#define EMAC4_MR1_OBCI_MASK 0x00000038
#define EMAC4_MR1_OBCI_50 0x00000000
#define EMAC4_MR1_OBCI_66 0x00000008
#define EMAC4_MR1_OBCI_83 0x00000010
#define EMAC4_MR1_OBCI_100 0x00000018
#define EMAC4_MR1_OBCI_100P 0x00000020
#define EMAC4_MR1_OBCI(freq) ((freq) <= 50 ? EMAC4_MR1_OBCI_50 : \
(freq) <= 66 ? EMAC4_MR1_OBCI_66 : \
(freq) <= 83 ? EMAC4_MR1_OBCI_83 : \
(freq) <= 100 ? EMAC4_MR1_OBCI_100 : \
EMAC4_MR1_OBCI_100P)
/* EMACx_TMR0 */
#define EMAC_TMR0_GNP 0x80000000
#define EMAC_TMR0_DEFAULT 0x00000000
#define EMAC4_TMR0_TFAE_2_32 0x00000001
#define EMAC4_TMR0_TFAE_4_64 0x00000002
#define EMAC4_TMR0_TFAE_8_128 0x00000003
#define EMAC4_TMR0_TFAE_16_256 0x00000004
#define EMAC4_TMR0_TFAE_32_512 0x00000005
#define EMAC4_TMR0_TFAE_64_1024 0x00000006
#define EMAC4_TMR0_TFAE_128_2048 0x00000007
#define EMAC4_TMR0_DEFAULT EMAC4_TMR0_TFAE_2_32
#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP | EMAC_TMR0_DEFAULT)
#define EMAC4_TMR0_XMIT (EMAC_TMR0_GNP | EMAC4_TMR0_DEFAULT)
/* EMACx_TMR1 */
#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16))
#define EMAC4_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14))
/* EMACx_RMR */
#define EMAC_RMR_SP 0x80000000
#define EMAC_RMR_SFCS 0x40000000
#define EMAC_RMR_RRP 0x20000000
#define EMAC_RMR_RFP 0x10000000
#define EMAC_RMR_ROP 0x08000000
#define EMAC_RMR_RPIR 0x04000000
#define EMAC_RMR_PPP 0x02000000
#define EMAC_RMR_PME 0x01000000
#define EMAC_RMR_PMME 0x00800000
#define EMAC_RMR_IAE 0x00400000
#define EMAC_RMR_MIAE 0x00200000
#define EMAC_RMR_BAE 0x00100000
#define EMAC_RMR_MAE 0x00080000
#define EMAC_RMR_BASE 0x00000000
#define EMAC4_RMR_RFAF_2_32 0x00000001
#define EMAC4_RMR_RFAF_4_64 0x00000002
#define EMAC4_RMR_RFAF_8_128 0x00000003
#define EMAC4_RMR_RFAF_16_256 0x00000004
#define EMAC4_RMR_RFAF_32_512 0x00000005
#define EMAC4_RMR_RFAF_64_1024 0x00000006
#define EMAC4_RMR_RFAF_128_2048 0x00000007
#define EMAC4_RMR_BASE EMAC4_RMR_RFAF_128_2048
/* EMACx_ISR & EMACx_ISER */
#define EMAC4_ISR_TXPE 0x20000000
#define EMAC4_ISR_RXPE 0x10000000
#define EMAC4_ISR_TXUE 0x08000000
#define EMAC4_ISR_RXOE 0x04000000
#define EMAC_ISR_OVR 0x02000000
#define EMAC_ISR_PP 0x01000000
#define EMAC_ISR_BP 0x00800000
#define EMAC_ISR_RP 0x00400000
#define EMAC_ISR_SE 0x00200000
#define EMAC_ISR_ALE 0x00100000
#define EMAC_ISR_BFCS 0x00080000
#define EMAC_ISR_PTLE 0x00040000
#define EMAC_ISR_ORE 0x00020000
#define EMAC_ISR_IRE 0x00010000
#define EMAC_ISR_SQE 0x00000080
#define EMAC_ISR_TE 0x00000040
#define EMAC_ISR_MOS 0x00000002
#define EMAC_ISR_MOF 0x00000001
/* EMACx_STACR */
#define EMAC_STACR_PHYD_MASK 0xffff
#define EMAC_STACR_PHYD_SHIFT 16
#define EMAC_STACR_OC 0x00008000
#define EMAC_STACR_PHYE 0x00004000
#define EMAC_STACR_STAC_MASK 0x00003000
#define EMAC_STACR_STAC_READ 0x00001000
#define EMAC_STACR_STAC_WRITE 0x00002000
#define EMAC_STACR_OPBC_MASK 0x00000C00
#define EMAC_STACR_OPBC_50 0x00000000
#define EMAC_STACR_OPBC_66 0x00000400
#define EMAC_STACR_OPBC_83 0x00000800
#define EMAC_STACR_OPBC_100 0x00000C00
#define EMAC_STACR_OPBC(freq) ((freq) <= 50 ? EMAC_STACR_OPBC_50 : \
(freq) <= 66 ? EMAC_STACR_OPBC_66 : \
(freq) <= 83 ? EMAC_STACR_OPBC_83 : EMAC_STACR_OPBC_100)
#define EMAC_STACR_BASE(opb) EMAC_STACR_OPBC(opb)
#define EMAC4_STACR_BASE(opb) 0x00000000
#define EMAC_STACR_PCDA_MASK 0x1f
#define EMAC_STACR_PCDA_SHIFT 5
#define EMAC_STACR_PRA_MASK 0x1f
#define EMACX_STACR_STAC_MASK 0x00003800
#define EMACX_STACR_STAC_READ 0x00001000
#define EMACX_STACR_STAC_WRITE 0x00000800
#define EMACX_STACR_STAC_IND_ADDR 0x00002000
#define EMACX_STACR_STAC_IND_READ 0x00003800
#define EMACX_STACR_STAC_IND_READINC 0x00003000
#define EMACX_STACR_STAC_IND_WRITE 0x00002800
/* EMACx_TRTR */
#define EMAC_TRTR_SHIFT_EMAC4 24
#define EMAC_TRTR_SHIFT 27
/* EMAC specific TX descriptor control fields (write access) */
#define EMAC_TX_CTRL_GFCS 0x0200
#define EMAC_TX_CTRL_GP 0x0100
#define EMAC_TX_CTRL_ISA 0x0080
#define EMAC_TX_CTRL_RSA 0x0040
#define EMAC_TX_CTRL_IVT 0x0020
#define EMAC_TX_CTRL_RVT 0x0010
#define EMAC_TX_CTRL_TAH_CSUM 0x000e
/* EMAC specific TX descriptor status fields (read access) */
#define EMAC_TX_ST_BFCS 0x0200
#define EMAC_TX_ST_LCS 0x0080
#define EMAC_TX_ST_ED 0x0040
#define EMAC_TX_ST_EC 0x0020
#define EMAC_TX_ST_LC 0x0010
#define EMAC_TX_ST_MC 0x0008
#define EMAC_TX_ST_SC 0x0004
#define EMAC_TX_ST_UR 0x0002
#define EMAC_TX_ST_SQE 0x0001
#define EMAC_IS_BAD_TX (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
EMAC_TX_ST_EC | EMAC_TX_ST_LC | \
EMAC_TX_ST_MC | EMAC_TX_ST_UR)
#define EMAC_IS_BAD_TX_TAH (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
EMAC_TX_ST_EC | EMAC_TX_ST_LC)
/* EMAC specific RX descriptor status fields (read access) */
#define EMAC_RX_ST_OE 0x0200
#define EMAC_RX_ST_PP 0x0100
#define EMAC_RX_ST_BP 0x0080
#define EMAC_RX_ST_RP 0x0040
#define EMAC_RX_ST_SE 0x0020
#define EMAC_RX_ST_AE 0x0010
#define EMAC_RX_ST_BFCS 0x0008
#define EMAC_RX_ST_PTL 0x0004
#define EMAC_RX_ST_ORE 0x0002
#define EMAC_RX_ST_IRE 0x0001
#define EMAC_RX_TAH_BAD_CSUM 0x0003
#define EMAC_BAD_RX_MASK (EMAC_RX_ST_OE | EMAC_RX_ST_BP | \
EMAC_RX_ST_RP | EMAC_RX_ST_SE | \
EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \
EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \
EMAC_RX_ST_IRE )
#endif /* __IBM_NEWEMAC_H */

파일 보기

@@ -0,0 +1,809 @@
/*
* drivers/net/ibm_newemac/mal.c
*
* Memory Access Layer (MAL) support
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright (c) 2004, 2005 Zultys Technologies.
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* Based on original work by
* Benjamin Herrenschmidt <benh@kernel.crashing.org>,
* David Gibson <hermes@gibson.dropbear.id.au>,
*
* Armin Kuster <akuster@mvista.com>
* Copyright 2002 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "core.h"
#include <asm/dcr-regs.h>
static int mal_count;
int __devinit mal_register_commac(struct mal_instance *mal,
struct mal_commac *commac)
{
unsigned long flags;
spin_lock_irqsave(&mal->lock, flags);
MAL_DBG(mal, "reg(%08x, %08x)" NL,
commac->tx_chan_mask, commac->rx_chan_mask);
/* Don't let multiple commacs claim the same channel(s) */
if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
(mal->rx_chan_mask & commac->rx_chan_mask)) {
spin_unlock_irqrestore(&mal->lock, flags);
printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
mal->index);
return -EBUSY;
}
if (list_empty(&mal->list))
napi_enable(&mal->napi);
mal->tx_chan_mask |= commac->tx_chan_mask;
mal->rx_chan_mask |= commac->rx_chan_mask;
list_add(&commac->list, &mal->list);
spin_unlock_irqrestore(&mal->lock, flags);
return 0;
}
void mal_unregister_commac(struct mal_instance *mal,
struct mal_commac *commac)
{
unsigned long flags;
spin_lock_irqsave(&mal->lock, flags);
MAL_DBG(mal, "unreg(%08x, %08x)" NL,
commac->tx_chan_mask, commac->rx_chan_mask);
mal->tx_chan_mask &= ~commac->tx_chan_mask;
mal->rx_chan_mask &= ~commac->rx_chan_mask;
list_del_init(&commac->list);
if (list_empty(&mal->list))
napi_disable(&mal->napi);
spin_unlock_irqrestore(&mal->lock, flags);
}
int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
{
BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
size > MAL_MAX_RX_SIZE);
MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
if (size & 0xf) {
printk(KERN_WARNING
"mal%d: incorrect RX size %lu for the channel %d\n",
mal->index, size, channel);
return -EINVAL;
}
set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
return 0;
}
int mal_tx_bd_offset(struct mal_instance *mal, int channel)
{
BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
return channel * NUM_TX_BUFF;
}
int mal_rx_bd_offset(struct mal_instance *mal, int channel)
{
BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
}
void mal_enable_tx_channel(struct mal_instance *mal, int channel)
{
unsigned long flags;
spin_lock_irqsave(&mal->lock, flags);
MAL_DBG(mal, "enable_tx(%d)" NL, channel);
set_mal_dcrn(mal, MAL_TXCASR,
get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
spin_unlock_irqrestore(&mal->lock, flags);
}
void mal_disable_tx_channel(struct mal_instance *mal, int channel)
{
set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
MAL_DBG(mal, "disable_tx(%d)" NL, channel);
}
void mal_enable_rx_channel(struct mal_instance *mal, int channel)
{
unsigned long flags;
/*
* On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
* of 8, but enabling in MAL_RXCASR needs the divided by 8 value
* for the bitmask
*/
if (!(channel % 8))
channel >>= 3;
spin_lock_irqsave(&mal->lock, flags);
MAL_DBG(mal, "enable_rx(%d)" NL, channel);
set_mal_dcrn(mal, MAL_RXCASR,
get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
spin_unlock_irqrestore(&mal->lock, flags);
}
void mal_disable_rx_channel(struct mal_instance *mal, int channel)
{
/*
* On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
* of 8, but enabling in MAL_RXCASR needs the divided by 8 value
* for the bitmask
*/
if (!(channel % 8))
channel >>= 3;
set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
MAL_DBG(mal, "disable_rx(%d)" NL, channel);
}
void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
{
unsigned long flags;
spin_lock_irqsave(&mal->lock, flags);
MAL_DBG(mal, "poll_add(%p)" NL, commac);
/* starts disabled */
set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
list_add_tail(&commac->poll_list, &mal->poll_list);
spin_unlock_irqrestore(&mal->lock, flags);
}
void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
{
unsigned long flags;
spin_lock_irqsave(&mal->lock, flags);
MAL_DBG(mal, "poll_del(%p)" NL, commac);
list_del(&commac->poll_list);
spin_unlock_irqrestore(&mal->lock, flags);
}
/* synchronized by mal_poll() */
static inline void mal_enable_eob_irq(struct mal_instance *mal)
{
MAL_DBG2(mal, "enable_irq" NL);
// XXX might want to cache MAL_CFG as the DCR read can be slooooow
set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
}
/* synchronized by NAPI state */
static inline void mal_disable_eob_irq(struct mal_instance *mal)
{
// XXX might want to cache MAL_CFG as the DCR read can be slooooow
set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
MAL_DBG2(mal, "disable_irq" NL);
}
static irqreturn_t mal_serr(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
u32 esr = get_mal_dcrn(mal, MAL_ESR);
/* Clear the error status register */
set_mal_dcrn(mal, MAL_ESR, esr);
MAL_DBG(mal, "SERR %08x" NL, esr);
if (esr & MAL_ESR_EVB) {
if (esr & MAL_ESR_DE) {
/* We ignore Descriptor error,
* TXDE or RXDE interrupt will be generated anyway.
*/
return IRQ_HANDLED;
}
if (esr & MAL_ESR_PEIN) {
/* PLB error, it's probably buggy hardware or
* incorrect physical address in BD (i.e. bug)
*/
if (net_ratelimit())
printk(KERN_ERR
"mal%d: system error, "
"PLB (ESR = 0x%08x)\n",
mal->index, esr);
return IRQ_HANDLED;
}
/* OPB error, it's probably buggy hardware or incorrect
* EBC setup
*/
if (net_ratelimit())
printk(KERN_ERR
"mal%d: system error, OPB (ESR = 0x%08x)\n",
mal->index, esr);
}
return IRQ_HANDLED;
}
static inline void mal_schedule_poll(struct mal_instance *mal)
{
if (likely(napi_schedule_prep(&mal->napi))) {
MAL_DBG2(mal, "schedule_poll" NL);
mal_disable_eob_irq(mal);
__napi_schedule(&mal->napi);
} else
MAL_DBG2(mal, "already in poll" NL);
}
static irqreturn_t mal_txeob(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
MAL_DBG2(mal, "txeob %08x" NL, r);
mal_schedule_poll(mal);
set_mal_dcrn(mal, MAL_TXEOBISR, r);
#ifdef CONFIG_PPC_DCR_NATIVE
if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
#endif
return IRQ_HANDLED;
}
static irqreturn_t mal_rxeob(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
MAL_DBG2(mal, "rxeob %08x" NL, r);
mal_schedule_poll(mal);
set_mal_dcrn(mal, MAL_RXEOBISR, r);
#ifdef CONFIG_PPC_DCR_NATIVE
if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
#endif
return IRQ_HANDLED;
}
static irqreturn_t mal_txde(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
set_mal_dcrn(mal, MAL_TXDEIR, deir);
MAL_DBG(mal, "txde %08x" NL, deir);
if (net_ratelimit())
printk(KERN_ERR
"mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
mal->index, deir);
return IRQ_HANDLED;
}
static irqreturn_t mal_rxde(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
struct list_head *l;
u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
MAL_DBG(mal, "rxde %08x" NL, deir);
list_for_each(l, &mal->list) {
struct mal_commac *mc = list_entry(l, struct mal_commac, list);
if (deir & mc->rx_chan_mask) {
set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
mc->ops->rxde(mc->dev);
}
}
mal_schedule_poll(mal);
set_mal_dcrn(mal, MAL_RXDEIR, deir);
return IRQ_HANDLED;
}
static irqreturn_t mal_int(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
u32 esr = get_mal_dcrn(mal, MAL_ESR);
if (esr & MAL_ESR_EVB) {
/* descriptor error */
if (esr & MAL_ESR_DE) {
if (esr & MAL_ESR_CIDT)
return mal_rxde(irq, dev_instance);
else
return mal_txde(irq, dev_instance);
} else { /* SERR */
return mal_serr(irq, dev_instance);
}
}
return IRQ_HANDLED;
}
void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
{
/* Spinlock-type semantics: only one caller disable poll at a time */
while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
msleep(1);
/* Synchronize with the MAL NAPI poller */
napi_synchronize(&mal->napi);
}
void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
{
smp_wmb();
clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
/* Feels better to trigger a poll here to catch up with events that
* may have happened on this channel while disabled. It will most
* probably be delayed until the next interrupt but that's mostly a
* non-issue in the context where this is called.
*/
napi_schedule(&mal->napi);
}
static int mal_poll(struct napi_struct *napi, int budget)
{
struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
struct list_head *l;
int received = 0;
unsigned long flags;
MAL_DBG2(mal, "poll(%d)" NL, budget);
again:
/* Process TX skbs */
list_for_each(l, &mal->poll_list) {
struct mal_commac *mc =
list_entry(l, struct mal_commac, poll_list);
mc->ops->poll_tx(mc->dev);
}
/* Process RX skbs.
*
* We _might_ need something more smart here to enforce polling
* fairness.
*/
list_for_each(l, &mal->poll_list) {
struct mal_commac *mc =
list_entry(l, struct mal_commac, poll_list);
int n;
if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
continue;
n = mc->ops->poll_rx(mc->dev, budget);
if (n) {
received += n;
budget -= n;
if (budget <= 0)
goto more_work; // XXX What if this is the last one ?
}
}
/* We need to disable IRQs to protect from RXDE IRQ here */
spin_lock_irqsave(&mal->lock, flags);
__napi_complete(napi);
mal_enable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags);
/* Check for "rotting" packet(s) */
list_for_each(l, &mal->poll_list) {
struct mal_commac *mc =
list_entry(l, struct mal_commac, poll_list);
if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
continue;
if (unlikely(mc->ops->peek_rx(mc->dev) ||
test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
MAL_DBG2(mal, "rotting packet" NL);
if (napi_reschedule(napi))
mal_disable_eob_irq(mal);
else
MAL_DBG2(mal, "already in poll list" NL);
if (budget > 0)
goto again;
else
goto more_work;
}
mc->ops->poll_tx(mc->dev);
}
more_work:
MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
return received;
}
static void mal_reset(struct mal_instance *mal)
{
int n = 10;
MAL_DBG(mal, "reset" NL);
set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
/* Wait for reset to complete (1 system clock) */
while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
--n;
if (unlikely(!n))
printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
}
int mal_get_regs_len(struct mal_instance *mal)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct mal_regs);
}
void *mal_dump_regs(struct mal_instance *mal, void *buf)
{
struct emac_ethtool_regs_subhdr *hdr = buf;
struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
int i;
hdr->version = mal->version;
hdr->index = mal->index;
regs->tx_count = mal->num_tx_chans;
regs->rx_count = mal->num_rx_chans;
regs->cfg = get_mal_dcrn(mal, MAL_CFG);
regs->esr = get_mal_dcrn(mal, MAL_ESR);
regs->ier = get_mal_dcrn(mal, MAL_IER);
regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
for (i = 0; i < regs->tx_count; ++i)
regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
for (i = 0; i < regs->rx_count; ++i) {
regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
}
return regs + 1;
}
static int __devinit mal_probe(struct platform_device *ofdev)
{
struct mal_instance *mal;
int err = 0, i, bd_size;
int index = mal_count++;
unsigned int dcr_base;
const u32 *prop;
u32 cfg;
unsigned long irqflags;
irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
if (!mal) {
printk(KERN_ERR
"mal%d: out of memory allocating MAL structure!\n",
index);
return -ENOMEM;
}
mal->index = index;
mal->ofdev = ofdev;
mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
MAL_DBG(mal, "probe" NL);
prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
if (prop == NULL) {
printk(KERN_ERR
"mal%d: can't find MAL num-tx-chans property!\n",
index);
err = -ENODEV;
goto fail;
}
mal->num_tx_chans = prop[0];
prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
if (prop == NULL) {
printk(KERN_ERR
"mal%d: can't find MAL num-rx-chans property!\n",
index);
err = -ENODEV;
goto fail;
}
mal->num_rx_chans = prop[0];
dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
if (dcr_base == 0) {
printk(KERN_ERR
"mal%d: can't find DCR resource!\n", index);
err = -ENODEV;
goto fail;
}
mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
if (!DCR_MAP_OK(mal->dcr_host)) {
printk(KERN_ERR
"mal%d: failed to map DCRs !\n", index);
err = -ENODEV;
goto fail;
}
if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
MAL_FTR_COMMON_ERR_INT);
#else
printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
ofdev->dev.of_node->full_name);
err = -ENODEV;
goto fail;
#endif
}
mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
mal->txde_irq = mal->rxde_irq = mal->serr_irq;
} else {
mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
}
if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
mal->rxde_irq == NO_IRQ) {
printk(KERN_ERR
"mal%d: failed to map interrupts !\n", index);
err = -ENODEV;
goto fail_unmap;
}
INIT_LIST_HEAD(&mal->poll_list);
INIT_LIST_HEAD(&mal->list);
spin_lock_init(&mal->lock);
init_dummy_netdev(&mal->dummy_dev);
netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
CONFIG_IBM_NEW_EMAC_POLL_WEIGHT);
/* Load power-on reset defaults */
mal_reset(mal);
/* Set the MAL configuration register */
cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
/* Current Axon is not happy with priority being non-0, it can
* deadlock, fix it up here
*/
if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
/* Apply configuration */
set_mal_dcrn(mal, MAL_CFG, cfg);
/* Allocate space for BD rings */
BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
mal->bd_virt =
dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
GFP_KERNEL);
if (mal->bd_virt == NULL) {
printk(KERN_ERR
"mal%d: out of memory allocating RX/TX descriptors!\n",
index);
err = -ENOMEM;
goto fail_unmap;
}
memset(mal->bd_virt, 0, bd_size);
for (i = 0; i < mal->num_tx_chans; ++i)
set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
sizeof(struct mal_descriptor) *
mal_tx_bd_offset(mal, i));
for (i = 0; i < mal->num_rx_chans; ++i)
set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
sizeof(struct mal_descriptor) *
mal_rx_bd_offset(mal, i));
if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
irqflags = IRQF_SHARED;
hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
} else {
irqflags = 0;
hdlr_serr = mal_serr;
hdlr_txde = mal_txde;
hdlr_rxde = mal_rxde;
}
err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
if (err)
goto fail2;
err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
if (err)
goto fail3;
err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
if (err)
goto fail4;
err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
if (err)
goto fail5;
err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
if (err)
goto fail6;
/* Enable all MAL SERR interrupt sources */
if (mal->version == 2)
set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
else
set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
/* Enable EOB interrupt */
mal_enable_eob_irq(mal);
printk(KERN_INFO
"MAL v%d %s, %d TX channels, %d RX channels\n",
mal->version, ofdev->dev.of_node->full_name,
mal->num_tx_chans, mal->num_rx_chans);
/* Advertise this instance to the rest of the world */
wmb();
dev_set_drvdata(&ofdev->dev, mal);
mal_dbg_register(mal);
return 0;
fail6:
free_irq(mal->rxde_irq, mal);
fail5:
free_irq(mal->txeob_irq, mal);
fail4:
free_irq(mal->txde_irq, mal);
fail3:
free_irq(mal->serr_irq, mal);
fail2:
dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
fail_unmap:
dcr_unmap(mal->dcr_host, 0x100);
fail:
kfree(mal);
return err;
}
static int __devexit mal_remove(struct platform_device *ofdev)
{
struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
MAL_DBG(mal, "remove" NL);
/* Synchronize with scheduled polling */
napi_disable(&mal->napi);
if (!list_empty(&mal->list)) {
/* This is *very* bad */
printk(KERN_EMERG
"mal%d: commac list is not empty on remove!\n",
mal->index);
WARN_ON(1);
}
dev_set_drvdata(&ofdev->dev, NULL);
free_irq(mal->serr_irq, mal);
free_irq(mal->txde_irq, mal);
free_irq(mal->txeob_irq, mal);
free_irq(mal->rxde_irq, mal);
free_irq(mal->rxeob_irq, mal);
mal_reset(mal);
mal_dbg_unregister(mal);
dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
mal->bd_dma);
kfree(mal);
return 0;
}
static struct of_device_id mal_platform_match[] =
{
{
.compatible = "ibm,mcmal",
},
{
.compatible = "ibm,mcmal2",
},
/* Backward compat */
{
.type = "mcmal-dma",
.compatible = "ibm,mcmal",
},
{
.type = "mcmal-dma",
.compatible = "ibm,mcmal2",
},
{},
};
static struct platform_driver mal_of_driver = {
.driver = {
.name = "mcmal",
.owner = THIS_MODULE,
.of_match_table = mal_platform_match,
},
.probe = mal_probe,
.remove = mal_remove,
};
int __init mal_init(void)
{
return platform_driver_register(&mal_of_driver);
}
void mal_exit(void)
{
platform_driver_unregister(&mal_of_driver);
}

파일 보기

@@ -0,0 +1,316 @@
/*
* drivers/net/ibm_newemac/mal.h
*
* Memory Access Layer (MAL) support
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright (c) 2004, 2005 Zultys Technologies.
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* Based on original work by
* Armin Kuster <akuster@mvista.com>
* Copyright 2002 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __IBM_NEWEMAC_MAL_H
#define __IBM_NEWEMAC_MAL_H
/*
* There are some variations on the MAL, we express them in this driver as
* MAL Version 1 and 2 though that doesn't match any IBM terminology.
*
* We call MAL 1 the version in 405GP, 405GPR, 405EP, 440EP, 440GR and
* NP405H.
*
* We call MAL 2 the version in 440GP, 440GX, 440SP, 440SPE and Axon
*
* The driver expects a "version" property in the emac node containing
* a number 1 or 2. New device-trees for EMAC capable platforms are thus
* required to include that when porting to arch/powerpc.
*/
/* MALx DCR registers */
#define MAL_CFG 0x00
#define MAL_CFG_SR 0x80000000
#define MAL_CFG_PLBB 0x00004000
#define MAL_CFG_OPBBL 0x00000080
#define MAL_CFG_EOPIE 0x00000004
#define MAL_CFG_LEA 0x00000002
#define MAL_CFG_SD 0x00000001
/* MAL V1 CFG bits */
#define MAL1_CFG_PLBP_MASK 0x00c00000
#define MAL1_CFG_PLBP_10 0x00800000
#define MAL1_CFG_GA 0x00200000
#define MAL1_CFG_OA 0x00100000
#define MAL1_CFG_PLBLE 0x00080000
#define MAL1_CFG_PLBT_MASK 0x00078000
#define MAL1_CFG_DEFAULT (MAL1_CFG_PLBP_10 | MAL1_CFG_PLBT_MASK)
/* MAL V2 CFG bits */
#define MAL2_CFG_RPP_MASK 0x00c00000
#define MAL2_CFG_RPP_10 0x00800000
#define MAL2_CFG_RMBS_MASK 0x00300000
#define MAL2_CFG_WPP_MASK 0x000c0000
#define MAL2_CFG_WPP_10 0x00080000
#define MAL2_CFG_WMBS_MASK 0x00030000
#define MAL2_CFG_PLBLE 0x00008000
#define MAL2_CFG_DEFAULT (MAL2_CFG_RMBS_MASK | MAL2_CFG_WMBS_MASK | \
MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10)
#define MAL_ESR 0x01
#define MAL_ESR_EVB 0x80000000
#define MAL_ESR_CIDT 0x40000000
#define MAL_ESR_CID_MASK 0x3e000000
#define MAL_ESR_CID_SHIFT 25
#define MAL_ESR_DE 0x00100000
#define MAL_ESR_OTE 0x00040000
#define MAL_ESR_OSE 0x00020000
#define MAL_ESR_PEIN 0x00010000
#define MAL_ESR_DEI 0x00000010
#define MAL_ESR_OTEI 0x00000004
#define MAL_ESR_OSEI 0x00000002
#define MAL_ESR_PBEI 0x00000001
/* MAL V1 ESR bits */
#define MAL1_ESR_ONE 0x00080000
#define MAL1_ESR_ONEI 0x00000008
/* MAL V2 ESR bits */
#define MAL2_ESR_PTE 0x00800000
#define MAL2_ESR_PRE 0x00400000
#define MAL2_ESR_PWE 0x00200000
#define MAL2_ESR_PTEI 0x00000080
#define MAL2_ESR_PREI 0x00000040
#define MAL2_ESR_PWEI 0x00000020
#define MAL_IER 0x02
#define MAL_IER_DE 0x00000010
#define MAL_IER_OTE 0x00000004
#define MAL_IER_OE 0x00000002
#define MAL_IER_PE 0x00000001
/* MAL V1 IER bits */
#define MAL1_IER_NWE 0x00000008
#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
/* MAL V2 IER bits */
#define MAL2_IER_PT 0x00000080
#define MAL2_IER_PRE 0x00000040
#define MAL2_IER_PWE 0x00000020
#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
#define MAL_TXCASR 0x04
#define MAL_TXCARR 0x05
#define MAL_TXEOBISR 0x06
#define MAL_TXDEIR 0x07
#define MAL_RXCASR 0x10
#define MAL_RXCARR 0x11
#define MAL_RXEOBISR 0x12
#define MAL_RXDEIR 0x13
#define MAL_TXCTPR(n) ((n) + 0x20)
#define MAL_RXCTPR(n) ((n) + 0x40)
#define MAL_RCBS(n) ((n) + 0x60)
/* In reality MAL can handle TX buffers up to 4095 bytes long,
* but this isn't a good round number :) --ebs
*/
#define MAL_MAX_TX_SIZE 4080
#define MAL_MAX_RX_SIZE 4080
static inline int mal_rx_size(int len)
{
len = (len + 0xf) & ~0xf;
return len > MAL_MAX_RX_SIZE ? MAL_MAX_RX_SIZE : len;
}
static inline int mal_tx_chunks(int len)
{
return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE;
}
#define MAL_CHAN_MASK(n) (0x80000000 >> (n))
/* MAL Buffer Descriptor structure */
struct mal_descriptor {
u16 ctrl; /* MAL / Commac status control bits */
u16 data_len; /* Max length is 4K-1 (12 bits) */
u32 data_ptr; /* pointer to actual data buffer */
};
/* the following defines are for the MadMAL status and control registers. */
/* MADMAL transmit and receive status/control bits */
#define MAL_RX_CTRL_EMPTY 0x8000
#define MAL_RX_CTRL_WRAP 0x4000
#define MAL_RX_CTRL_CM 0x2000
#define MAL_RX_CTRL_LAST 0x1000
#define MAL_RX_CTRL_FIRST 0x0800
#define MAL_RX_CTRL_INTR 0x0400
#define MAL_RX_CTRL_SINGLE (MAL_RX_CTRL_LAST | MAL_RX_CTRL_FIRST)
#define MAL_IS_SINGLE_RX(ctrl) (((ctrl) & MAL_RX_CTRL_SINGLE) == MAL_RX_CTRL_SINGLE)
#define MAL_TX_CTRL_READY 0x8000
#define MAL_TX_CTRL_WRAP 0x4000
#define MAL_TX_CTRL_CM 0x2000
#define MAL_TX_CTRL_LAST 0x1000
#define MAL_TX_CTRL_INTR 0x0400
struct mal_commac_ops {
void (*poll_tx) (void *dev);
int (*poll_rx) (void *dev, int budget);
int (*peek_rx) (void *dev);
void (*rxde) (void *dev);
};
struct mal_commac {
struct mal_commac_ops *ops;
void *dev;
struct list_head poll_list;
long flags;
#define MAL_COMMAC_RX_STOPPED 0
#define MAL_COMMAC_POLL_DISABLED 1
u32 tx_chan_mask;
u32 rx_chan_mask;
struct list_head list;
};
struct mal_instance {
int version;
dcr_host_t dcr_host;
int num_tx_chans; /* Number of TX channels */
int num_rx_chans; /* Number of RX channels */
int txeob_irq; /* TX End Of Buffer IRQ */
int rxeob_irq; /* RX End Of Buffer IRQ */
int txde_irq; /* TX Descriptor Error IRQ */
int rxde_irq; /* RX Descriptor Error IRQ */
int serr_irq; /* MAL System Error IRQ */
struct list_head poll_list;
struct napi_struct napi;
struct list_head list;
u32 tx_chan_mask;
u32 rx_chan_mask;
dma_addr_t bd_dma;
struct mal_descriptor *bd_virt;
struct platform_device *ofdev;
int index;
spinlock_t lock;
struct net_device dummy_dev;
unsigned int features;
};
static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg)
{
return dcr_read(mal->dcr_host, reg);
}
static inline void set_mal_dcrn(struct mal_instance *mal, int reg, u32 val)
{
dcr_write(mal->dcr_host, reg, val);
}
/* Features of various MAL implementations */
/* Set if you have interrupt coalescing and you have to clear the SDR
* register for TXEOB and RXEOB interrupts to work
*/
#define MAL_FTR_CLEAR_ICINTSTAT 0x00000001
/* Set if your MAL has SERR, TXDE, and RXDE OR'd into a single UIC
* interrupt
*/
#define MAL_FTR_COMMON_ERR_INT 0x00000002
enum {
MAL_FTRS_ALWAYS = 0,
MAL_FTRS_POSSIBLE =
#ifdef CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
MAL_FTR_CLEAR_ICINTSTAT |
#endif
#ifdef CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR
MAL_FTR_COMMON_ERR_INT |
#endif
0,
};
static inline int mal_has_feature(struct mal_instance *dev,
unsigned long feature)
{
return (MAL_FTRS_ALWAYS & feature) ||
(MAL_FTRS_POSSIBLE & dev->features & feature);
}
/* Register MAL devices */
int mal_init(void);
void mal_exit(void);
int mal_register_commac(struct mal_instance *mal,
struct mal_commac *commac);
void mal_unregister_commac(struct mal_instance *mal,
struct mal_commac *commac);
int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size);
/* Returns BD ring offset for a particular channel
(in 'struct mal_descriptor' elements)
*/
int mal_tx_bd_offset(struct mal_instance *mal, int channel);
int mal_rx_bd_offset(struct mal_instance *mal, int channel);
void mal_enable_tx_channel(struct mal_instance *mal, int channel);
void mal_disable_tx_channel(struct mal_instance *mal, int channel);
void mal_enable_rx_channel(struct mal_instance *mal, int channel);
void mal_disable_rx_channel(struct mal_instance *mal, int channel);
void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac);
void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac);
/* Add/remove EMAC to/from MAL polling list */
void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac);
void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac);
/* Ethtool MAL registers */
struct mal_regs {
u32 tx_count;
u32 rx_count;
u32 cfg;
u32 esr;
u32 ier;
u32 tx_casr;
u32 tx_carr;
u32 tx_eobisr;
u32 tx_deir;
u32 rx_casr;
u32 rx_carr;
u32 rx_eobisr;
u32 rx_deir;
u32 tx_ctpr[32];
u32 rx_ctpr[32];
u32 rcbs[32];
};
int mal_get_regs_len(struct mal_instance *mal);
void *mal_dump_regs(struct mal_instance *mal, void *buf);
#endif /* __IBM_NEWEMAC_MAL_H */

파일 보기

@@ -0,0 +1,541 @@
/*
* drivers/net/ibm_newemac/phy.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, PHY support.
* Borrowed from sungem_phy.c, though I only kept the generic MII
* driver for now.
*
* This file should be shared with other drivers or eventually
* merged as the "low level" part of miilib
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
* (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include "emac.h"
#include "phy.h"
#define phy_read _phy_read
#define phy_write _phy_write
static inline int _phy_read(struct mii_phy *phy, int reg)
{
return phy->mdio_read(phy->dev, phy->address, reg);
}
static inline void _phy_write(struct mii_phy *phy, int reg, int val)
{
phy->mdio_write(phy->dev, phy->address, reg, val);
}
static inline int gpcs_phy_read(struct mii_phy *phy, int reg)
{
return phy->mdio_read(phy->dev, phy->gpcs_address, reg);
}
static inline void gpcs_phy_write(struct mii_phy *phy, int reg, int val)
{
phy->mdio_write(phy->dev, phy->gpcs_address, reg, val);
}
int emac_mii_reset_phy(struct mii_phy *phy)
{
int val;
int limit = 10000;
val = phy_read(phy, MII_BMCR);
val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
val |= BMCR_RESET;
phy_write(phy, MII_BMCR, val);
udelay(300);
while (--limit) {
val = phy_read(phy, MII_BMCR);
if (val >= 0 && (val & BMCR_RESET) == 0)
break;
udelay(10);
}
if ((val & BMCR_ISOLATE) && limit > 0)
phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
return limit <= 0;
}
int emac_mii_reset_gpcs(struct mii_phy *phy)
{
int val;
int limit = 10000;
val = gpcs_phy_read(phy, MII_BMCR);
val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
val |= BMCR_RESET;
gpcs_phy_write(phy, MII_BMCR, val);
udelay(300);
while (--limit) {
val = gpcs_phy_read(phy, MII_BMCR);
if (val >= 0 && (val & BMCR_RESET) == 0)
break;
udelay(10);
}
if ((val & BMCR_ISOLATE) && limit > 0)
gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
/* Configure GPCS interface to recommended setting for SGMII */
gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
gpcs_phy_write(phy, 0x00, 0x0140); /* 1Gbps, FDX */
}
return limit <= 0;
}
static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
{
int ctl, adv;
phy->autoneg = AUTONEG_ENABLE;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = phy->asym_pause = 0;
phy->advertising = advertise;
ctl = phy_read(phy, MII_BMCR);
if (ctl < 0)
return ctl;
ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
/* First clear the PHY */
phy_write(phy, MII_BMCR, ctl);
/* Setup standard advertise */
adv = phy_read(phy, MII_ADVERTISE);
if (adv < 0)
return adv;
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
if (advertise & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (advertise & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (advertise & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertise & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
if (advertise & ADVERTISED_Pause)
adv |= ADVERTISE_PAUSE_CAP;
if (advertise & ADVERTISED_Asym_Pause)
adv |= ADVERTISE_PAUSE_ASYM;
phy_write(phy, MII_ADVERTISE, adv);
if (phy->features &
(SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
adv = phy_read(phy, MII_CTRL1000);
if (adv < 0)
return adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
if (advertise & ADVERTISED_1000baseT_Full)
adv |= ADVERTISE_1000FULL;
if (advertise & ADVERTISED_1000baseT_Half)
adv |= ADVERTISE_1000HALF;
phy_write(phy, MII_CTRL1000, adv);
}
/* Start/Restart aneg */
ctl = phy_read(phy, MII_BMCR);
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
{
int ctl;
phy->autoneg = AUTONEG_DISABLE;
phy->speed = speed;
phy->duplex = fd;
phy->pause = phy->asym_pause = 0;
ctl = phy_read(phy, MII_BMCR);
if (ctl < 0)
return ctl;
ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
/* First clear the PHY */
phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
/* Select speed & duplex */
switch (speed) {
case SPEED_10:
break;
case SPEED_100:
ctl |= BMCR_SPEED100;
break;
case SPEED_1000:
ctl |= BMCR_SPEED1000;
break;
default:
return -EINVAL;
}
if (fd == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int genmii_poll_link(struct mii_phy *phy)
{
int status;
/* Clear latched value with dummy read */
phy_read(phy, MII_BMSR);
status = phy_read(phy, MII_BMSR);
if (status < 0 || (status & BMSR_LSTATUS) == 0)
return 0;
if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE))
return 0;
return 1;
}
static int genmii_read_link(struct mii_phy *phy)
{
if (phy->autoneg == AUTONEG_ENABLE) {
int glpa = 0;
int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE);
if (lpa < 0)
return lpa;
if (phy->features &
(SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
int adv = phy_read(phy, MII_CTRL1000);
glpa = phy_read(phy, MII_STAT1000);
if (glpa < 0 || adv < 0)
return adv;
glpa &= adv << 2;
}
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = phy->asym_pause = 0;
if (glpa & (LPA_1000FULL | LPA_1000HALF)) {
phy->speed = SPEED_1000;
if (glpa & LPA_1000FULL)
phy->duplex = DUPLEX_FULL;
} else if (lpa & (LPA_100FULL | LPA_100HALF)) {
phy->speed = SPEED_100;
if (lpa & LPA_100FULL)
phy->duplex = DUPLEX_FULL;
} else if (lpa & LPA_10FULL)
phy->duplex = DUPLEX_FULL;
if (phy->duplex == DUPLEX_FULL) {
phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
}
} else {
int bmcr = phy_read(phy, MII_BMCR);
if (bmcr < 0)
return bmcr;
if (bmcr & BMCR_FULLDPLX)
phy->duplex = DUPLEX_FULL;
else
phy->duplex = DUPLEX_HALF;
if (bmcr & BMCR_SPEED1000)
phy->speed = SPEED_1000;
else if (bmcr & BMCR_SPEED100)
phy->speed = SPEED_100;
else
phy->speed = SPEED_10;
phy->pause = phy->asym_pause = 0;
}
return 0;
}
/* Generic implementation for most 10/100/1000 PHYs */
static struct mii_phy_ops generic_phy_ops = {
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def genmii_phy_def = {
.phy_id = 0x00000000,
.phy_id_mask = 0x00000000,
.name = "Generic MII",
.ops = &generic_phy_ops
};
/* CIS8201 */
#define MII_CIS8201_10BTCSR 0x16
#define TENBTCSR_ECHO_DISABLE 0x2000
#define MII_CIS8201_EPCR 0x17
#define EPCR_MODE_MASK 0x3000
#define EPCR_GMII_MODE 0x0000
#define EPCR_RGMII_MODE 0x1000
#define EPCR_TBI_MODE 0x2000
#define EPCR_RTBI_MODE 0x3000
#define MII_CIS8201_ACSR 0x1c
#define ACSR_PIN_PRIO_SELECT 0x0004
static int cis8201_init(struct mii_phy *phy)
{
int epcr;
epcr = phy_read(phy, MII_CIS8201_EPCR);
if (epcr < 0)
return epcr;
epcr &= ~EPCR_MODE_MASK;
switch (phy->mode) {
case PHY_MODE_TBI:
epcr |= EPCR_TBI_MODE;
break;
case PHY_MODE_RTBI:
epcr |= EPCR_RTBI_MODE;
break;
case PHY_MODE_GMII:
epcr |= EPCR_GMII_MODE;
break;
case PHY_MODE_RGMII:
default:
epcr |= EPCR_RGMII_MODE;
}
phy_write(phy, MII_CIS8201_EPCR, epcr);
/* MII regs override strap pins */
phy_write(phy, MII_CIS8201_ACSR,
phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT);
/* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */
phy_write(phy, MII_CIS8201_10BTCSR,
phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE);
return 0;
}
static struct mii_phy_ops cis8201_phy_ops = {
.init = cis8201_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def cis8201_phy_def = {
.phy_id = 0x000fc410,
.phy_id_mask = 0x000ffff0,
.name = "CIS8201 Gigabit Ethernet",
.ops = &cis8201_phy_ops
};
static struct mii_phy_def bcm5248_phy_def = {
.phy_id = 0x0143bc00,
.phy_id_mask = 0x0ffffff0,
.name = "BCM5248 10/100 SMII Ethernet",
.ops = &generic_phy_ops
};
static int m88e1111_init(struct mii_phy *phy)
{
pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
phy_write(phy, 0x14, 0x0ce3);
phy_write(phy, 0x18, 0x4101);
phy_write(phy, 0x09, 0x0e00);
phy_write(phy, 0x04, 0x01e1);
phy_write(phy, 0x00, 0x9140);
phy_write(phy, 0x00, 0x1140);
return 0;
}
static int m88e1112_init(struct mii_phy *phy)
{
/*
* Marvell 88E1112 PHY needs to have the SGMII MAC
* interace (page 2) properly configured to
* communicate with the 460EX/GT GPCS interface.
*/
u16 reg_short;
pr_debug("%s: Marvell 88E1112 Ethernet\n", __func__);
/* Set access to Page 2 */
phy_write(phy, 0x16, 0x0002);
phy_write(phy, 0x00, 0x0040); /* 1Gbps */
reg_short = (u16)(phy_read(phy, 0x1a));
reg_short |= 0x8000; /* bypass Auto-Negotiation */
phy_write(phy, 0x1a, reg_short);
emac_mii_reset_phy(phy); /* reset MAC interface */
/* Reset access to Page 0 */
phy_write(phy, 0x16, 0x0000);
return 0;
}
static int et1011c_init(struct mii_phy *phy)
{
u16 reg_short;
reg_short = (u16)(phy_read(phy, 0x16));
reg_short &= ~(0x7);
reg_short |= 0x6; /* RGMII Trace Delay*/
phy_write(phy, 0x16, reg_short);
reg_short = (u16)(phy_read(phy, 0x17));
reg_short &= ~(0x40);
phy_write(phy, 0x17, reg_short);
phy_write(phy, 0x1c, 0x74f0);
return 0;
}
static struct mii_phy_ops et1011c_phy_ops = {
.init = et1011c_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def et1011c_phy_def = {
.phy_id = 0x0282f000,
.phy_id_mask = 0x0fffff00,
.name = "ET1011C Gigabit Ethernet",
.ops = &et1011c_phy_ops
};
static struct mii_phy_ops m88e1111_phy_ops = {
.init = m88e1111_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def m88e1111_phy_def = {
.phy_id = 0x01410CC0,
.phy_id_mask = 0x0ffffff0,
.name = "Marvell 88E1111 Ethernet",
.ops = &m88e1111_phy_ops,
};
static struct mii_phy_ops m88e1112_phy_ops = {
.init = m88e1112_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def m88e1112_phy_def = {
.phy_id = 0x01410C90,
.phy_id_mask = 0x0ffffff0,
.name = "Marvell 88E1112 Ethernet",
.ops = &m88e1112_phy_ops,
};
static struct mii_phy_def *mii_phy_table[] = {
&et1011c_phy_def,
&cis8201_phy_def,
&bcm5248_phy_def,
&m88e1111_phy_def,
&m88e1112_phy_def,
&genmii_phy_def,
NULL
};
int emac_mii_phy_probe(struct mii_phy *phy, int address)
{
struct mii_phy_def *def;
int i;
u32 id;
phy->autoneg = AUTONEG_DISABLE;
phy->advertising = 0;
phy->address = address;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = phy->asym_pause = 0;
/* Take PHY out of isolate mode and reset it. */
if (emac_mii_reset_phy(phy))
return -ENODEV;
/* Read ID and find matching entry */
id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2);
for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
if ((id & def->phy_id_mask) == def->phy_id)
break;
/* Should never be NULL (we have a generic entry), but... */
if (!def)
return -ENODEV;
phy->def = def;
/* Determine PHY features if needed */
phy->features = def->features;
if (!phy->features) {
u16 bmsr = phy_read(phy, MII_BMSR);
if (bmsr & BMSR_ANEGCAPABLE)
phy->features |= SUPPORTED_Autoneg;
if (bmsr & BMSR_10HALF)
phy->features |= SUPPORTED_10baseT_Half;
if (bmsr & BMSR_10FULL)
phy->features |= SUPPORTED_10baseT_Full;
if (bmsr & BMSR_100HALF)
phy->features |= SUPPORTED_100baseT_Half;
if (bmsr & BMSR_100FULL)
phy->features |= SUPPORTED_100baseT_Full;
if (bmsr & BMSR_ESTATEN) {
u16 esr = phy_read(phy, MII_ESTATUS);
if (esr & ESTATUS_1000_TFULL)
phy->features |= SUPPORTED_1000baseT_Full;
if (esr & ESTATUS_1000_THALF)
phy->features |= SUPPORTED_1000baseT_Half;
}
phy->features |= SUPPORTED_MII;
}
/* Setup default advertising */
phy->advertising = phy->features;
return 0;
}
MODULE_LICENSE("GPL");

파일 보기

@@ -0,0 +1,87 @@
/*
* drivers/net/ibm_newemac/phy.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, PHY support
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Benjamin Herrenschmidt <benh@kernel.crashing.org>
* February 2003
*
* Minor additions by Eugene Surovegin <ebs@ebshome.net>, 2004
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This file basically duplicates sungem_phy.{c,h} with different PHYs
* supported. I'm looking into merging that in a single mii layer more
* flexible than mii.c
*/
#ifndef __IBM_NEWEMAC_PHY_H
#define __IBM_NEWEMAC_PHY_H
struct mii_phy;
/* Operations supported by any kind of PHY */
struct mii_phy_ops {
int (*init) (struct mii_phy * phy);
int (*suspend) (struct mii_phy * phy, int wol_options);
int (*setup_aneg) (struct mii_phy * phy, u32 advertise);
int (*setup_forced) (struct mii_phy * phy, int speed, int fd);
int (*poll_link) (struct mii_phy * phy);
int (*read_link) (struct mii_phy * phy);
};
/* Structure used to statically define an mii/gii based PHY */
struct mii_phy_def {
u32 phy_id; /* Concatenated ID1 << 16 | ID2 */
u32 phy_id_mask; /* Significant bits */
u32 features; /* Ethtool SUPPORTED_* defines or
0 for autodetect */
int magic_aneg; /* Autoneg does all speed test for us */
const char *name;
const struct mii_phy_ops *ops;
};
/* An instance of a PHY, partially borrowed from mii_if_info */
struct mii_phy {
struct mii_phy_def *def;
u32 advertising; /* Ethtool ADVERTISED_* defines */
u32 features; /* Copied from mii_phy_def.features
or determined automaticaly */
int address; /* PHY address */
int mode; /* PHY mode */
int gpcs_address; /* GPCS PHY address */
/* 1: autoneg enabled, 0: disabled */
int autoneg;
/* forced speed & duplex (no autoneg)
* partner speed & duplex & pause (autoneg)
*/
int speed;
int duplex;
int pause;
int asym_pause;
/* Provided by host chip */
struct net_device *dev;
int (*mdio_read) (struct net_device * dev, int addr, int reg);
void (*mdio_write) (struct net_device * dev, int addr, int reg,
int val);
};
/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
* filled, the remaining fields will be filled on return
*/
int emac_mii_phy_probe(struct mii_phy *phy, int address);
int emac_mii_reset_phy(struct mii_phy *phy);
int emac_mii_reset_gpcs(struct mii_phy *phy);
#endif /* __IBM_NEWEMAC_PHY_H */

파일 보기

@@ -0,0 +1,338 @@
/*
* drivers/net/ibm_newemac/rgmii.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright (c) 2004, 2005 Zultys Technologies.
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* Based on original work by
* Matt Porter <mporter@kernel.crashing.org>
* Copyright 2004 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <asm/io.h>
#include "emac.h"
#include "debug.h"
// XXX FIXME: Axon seems to support a subset of the RGMII, we
// thus need to take that into account and possibly change some
// of the bit settings below that don't seem to quite match the
// AXON spec
/* RGMIIx_FER */
#define RGMII_FER_MASK(idx) (0x7 << ((idx) * 4))
#define RGMII_FER_RTBI(idx) (0x4 << ((idx) * 4))
#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4))
#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4))
#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4))
#define RGMII_FER_MII(idx) RGMII_FER_GMII(idx)
/* RGMIIx_SSR */
#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
static inline int rgmii_valid_mode(int phy_mode)
{
return phy_mode == PHY_MODE_GMII ||
phy_mode == PHY_MODE_MII ||
phy_mode == PHY_MODE_RGMII ||
phy_mode == PHY_MODE_TBI ||
phy_mode == PHY_MODE_RTBI;
}
static inline const char *rgmii_mode_name(int mode)
{
switch (mode) {
case PHY_MODE_RGMII:
return "RGMII";
case PHY_MODE_TBI:
return "TBI";
case PHY_MODE_GMII:
return "GMII";
case PHY_MODE_MII:
return "MII";
case PHY_MODE_RTBI:
return "RTBI";
default:
BUG();
}
}
static inline u32 rgmii_mode_mask(int mode, int input)
{
switch (mode) {
case PHY_MODE_RGMII:
return RGMII_FER_RGMII(input);
case PHY_MODE_TBI:
return RGMII_FER_TBI(input);
case PHY_MODE_GMII:
return RGMII_FER_GMII(input);
case PHY_MODE_MII:
return RGMII_FER_MII(input);
case PHY_MODE_RTBI:
return RGMII_FER_RTBI(input);
default:
BUG();
}
}
int __devinit rgmii_attach(struct platform_device *ofdev, int input, int mode)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
RGMII_DBG(dev, "attach(%d)" NL, input);
/* Check if we need to attach to a RGMII */
if (input < 0 || !rgmii_valid_mode(mode)) {
printk(KERN_ERR "%s: unsupported settings !\n",
ofdev->dev.of_node->full_name);
return -ENODEV;
}
mutex_lock(&dev->lock);
/* Enable this input */
out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
printk(KERN_NOTICE "%s: input %d in %s mode\n",
ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode));
++dev->users;
mutex_unlock(&dev->lock);
return 0;
}
void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
u32 ssr;
mutex_lock(&dev->lock);
ssr = in_be32(&p->ssr) & ~RGMII_SSR_MASK(input);
RGMII_DBG(dev, "speed(%d, %d)" NL, input, speed);
if (speed == SPEED_1000)
ssr |= RGMII_SSR_1000(input);
else if (speed == SPEED_100)
ssr |= RGMII_SSR_100(input);
out_be32(&p->ssr, ssr);
mutex_unlock(&dev->lock);
}
void rgmii_get_mdio(struct platform_device *ofdev, int input)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
u32 fer;
RGMII_DBG2(dev, "get_mdio(%d)" NL, input);
if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO))
return;
mutex_lock(&dev->lock);
fer = in_be32(&p->fer);
fer |= 0x00080000u >> input;
out_be32(&p->fer, fer);
(void)in_be32(&p->fer);
DBG2(dev, " fer = 0x%08x\n", fer);
}
void rgmii_put_mdio(struct platform_device *ofdev, int input)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
u32 fer;
RGMII_DBG2(dev, "put_mdio(%d)" NL, input);
if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO))
return;
fer = in_be32(&p->fer);
fer &= ~(0x00080000u >> input);
out_be32(&p->fer, fer);
(void)in_be32(&p->fer);
DBG2(dev, " fer = 0x%08x\n", fer);
mutex_unlock(&dev->lock);
}
void rgmii_detach(struct platform_device *ofdev, int input)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p;
BUG_ON(!dev || dev->users == 0);
p = dev->base;
mutex_lock(&dev->lock);
RGMII_DBG(dev, "detach(%d)" NL, input);
/* Disable this input */
out_be32(&p->fer, in_be32(&p->fer) & ~RGMII_FER_MASK(input));
--dev->users;
mutex_unlock(&dev->lock);
}
int rgmii_get_regs_len(struct platform_device *ofdev)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct rgmii_regs);
}
void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct emac_ethtool_regs_subhdr *hdr = buf;
struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1);
hdr->version = 0;
hdr->index = 0; /* for now, are there chips with more than one
* rgmii ? if yes, then we'll add a cell_index
* like we do for emac
*/
memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs));
return regs + 1;
}
static int __devinit rgmii_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct rgmii_instance *dev;
struct resource regs;
int rc;
rc = -ENOMEM;
dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL);
if (dev == NULL) {
printk(KERN_ERR "%s: could not allocate RGMII device!\n",
np->full_name);
goto err_gone;
}
mutex_init(&dev->lock);
dev->ofdev = ofdev;
rc = -ENXIO;
if (of_address_to_resource(np, 0, &regs)) {
printk(KERN_ERR "%s: Can't get registers address\n",
np->full_name);
goto err_free;
}
rc = -ENOMEM;
dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start,
sizeof(struct rgmii_regs));
if (dev->base == NULL) {
printk(KERN_ERR "%s: Can't map device registers!\n",
np->full_name);
goto err_free;
}
/* Check for RGMII flags */
if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
/* CAB lacks the right properties, fix this up */
if (of_device_is_compatible(ofdev->dev.of_node, "ibm,rgmii-axon"))
dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n",
in_be32(&dev->base->fer), in_be32(&dev->base->ssr));
/* Disable all inputs by default */
out_be32(&dev->base->fer, 0);
printk(KERN_INFO
"RGMII %s initialized with%s MDIO support\n",
ofdev->dev.of_node->full_name,
(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out");
wmb();
dev_set_drvdata(&ofdev->dev, dev);
return 0;
err_free:
kfree(dev);
err_gone:
return rc;
}
static int __devexit rgmii_remove(struct platform_device *ofdev)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
dev_set_drvdata(&ofdev->dev, NULL);
WARN_ON(dev->users != 0);
iounmap(dev->base);
kfree(dev);
return 0;
}
static struct of_device_id rgmii_match[] =
{
{
.compatible = "ibm,rgmii",
},
{
.type = "emac-rgmii",
},
{},
};
static struct platform_driver rgmii_driver = {
.driver = {
.name = "emac-rgmii",
.owner = THIS_MODULE,
.of_match_table = rgmii_match,
},
.probe = rgmii_probe,
.remove = rgmii_remove,
};
int __init rgmii_init(void)
{
return platform_driver_register(&rgmii_driver);
}
void rgmii_exit(void)
{
platform_driver_unregister(&rgmii_driver);
}

파일 보기

@@ -0,0 +1,82 @@
/*
* drivers/net/ibm_newemac/rgmii.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Based on ocp_zmii.h/ibm_emac_zmii.h
* Armin Kuster akuster@mvista.com
*
* Copyright 2004 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* Copyright (c) 2004, 2005 Zultys Technologies.
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __IBM_NEWEMAC_RGMII_H
#define __IBM_NEWEMAC_RGMII_H
/* RGMII bridge type */
#define RGMII_STANDARD 0
#define RGMII_AXON 1
/* RGMII bridge */
struct rgmii_regs {
u32 fer; /* Function enable register */
u32 ssr; /* Speed select register */
};
/* RGMII device */
struct rgmii_instance {
struct rgmii_regs __iomem *base;
/* RGMII bridge flags */
int flags;
#define EMAC_RGMII_FLAG_HAS_MDIO 0x00000001
/* Only one EMAC whacks us at a time */
struct mutex lock;
/* number of EMACs using this RGMII bridge */
int users;
/* OF device instance */
struct platform_device *ofdev;
};
#ifdef CONFIG_IBM_NEW_EMAC_RGMII
extern int rgmii_init(void);
extern void rgmii_exit(void);
extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
extern void rgmii_detach(struct platform_device *ofdev, int input);
extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
extern int rgmii_get_regs_len(struct platform_device *ofdev);
extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
# define rgmii_init() 0
# define rgmii_exit() do { } while(0)
# define rgmii_attach(x,y,z) (-ENXIO)
# define rgmii_detach(x,y) do { } while(0)
# define rgmii_get_mdio(o,i) do { } while (0)
# define rgmii_put_mdio(o,i) do { } while (0)
# define rgmii_set_speed(x,y,z) do { } while(0)
# define rgmii_get_regs_len(x) 0
# define rgmii_dump_regs(x,buf) (buf)
#endif /* !CONFIG_IBM_NEW_EMAC_RGMII */
#endif /* __IBM_NEWEMAC_RGMII_H */

파일 보기

@@ -0,0 +1,185 @@
/*
* drivers/net/ibm_newemac/tah.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright 2004 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <asm/io.h>
#include "emac.h"
#include "core.h"
int __devinit tah_attach(struct platform_device *ofdev, int channel)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
mutex_lock(&dev->lock);
/* Reset has been done at probe() time... nothing else to do for now */
++dev->users;
mutex_unlock(&dev->lock);
return 0;
}
void tah_detach(struct platform_device *ofdev, int channel)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
mutex_lock(&dev->lock);
--dev->users;
mutex_unlock(&dev->lock);
}
void tah_reset(struct platform_device *ofdev)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
struct tah_regs __iomem *p = dev->base;
int n;
/* Reset TAH */
out_be32(&p->mr, TAH_MR_SR);
n = 100;
while ((in_be32(&p->mr) & TAH_MR_SR) && n)
--n;
if (unlikely(!n))
printk(KERN_ERR "%s: reset timeout\n",
ofdev->dev.of_node->full_name);
/* 10KB TAH TX FIFO accommodates the max MTU of 9000 */
out_be32(&p->mr,
TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
TAH_MR_DIG);
}
int tah_get_regs_len(struct platform_device *ofdev)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct tah_regs);
}
void *tah_dump_regs(struct platform_device *ofdev, void *buf)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
struct emac_ethtool_regs_subhdr *hdr = buf;
struct tah_regs *regs = (struct tah_regs *)(hdr + 1);
hdr->version = 0;
hdr->index = 0; /* for now, are there chips with more than one
* zmii ? if yes, then we'll add a cell_index
* like we do for emac
*/
memcpy_fromio(regs, dev->base, sizeof(struct tah_regs));
return regs + 1;
}
static int __devinit tah_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct tah_instance *dev;
struct resource regs;
int rc;
rc = -ENOMEM;
dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL);
if (dev == NULL) {
printk(KERN_ERR "%s: could not allocate TAH device!\n",
np->full_name);
goto err_gone;
}
mutex_init(&dev->lock);
dev->ofdev = ofdev;
rc = -ENXIO;
if (of_address_to_resource(np, 0, &regs)) {
printk(KERN_ERR "%s: Can't get registers address\n",
np->full_name);
goto err_free;
}
rc = -ENOMEM;
dev->base = (struct tah_regs __iomem *)ioremap(regs.start,
sizeof(struct tah_regs));
if (dev->base == NULL) {
printk(KERN_ERR "%s: Can't map device registers!\n",
np->full_name);
goto err_free;
}
dev_set_drvdata(&ofdev->dev, dev);
/* Initialize TAH and enable IPv4 checksum verification, no TSO yet */
tah_reset(ofdev);
printk(KERN_INFO
"TAH %s initialized\n", ofdev->dev.of_node->full_name);
wmb();
return 0;
err_free:
kfree(dev);
err_gone:
return rc;
}
static int __devexit tah_remove(struct platform_device *ofdev)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
dev_set_drvdata(&ofdev->dev, NULL);
WARN_ON(dev->users != 0);
iounmap(dev->base);
kfree(dev);
return 0;
}
static struct of_device_id tah_match[] =
{
{
.compatible = "ibm,tah",
},
/* For backward compat with old DT */
{
.type = "tah",
},
{},
};
static struct platform_driver tah_driver = {
.driver = {
.name = "emac-tah",
.owner = THIS_MODULE,
.of_match_table = tah_match,
},
.probe = tah_probe,
.remove = tah_remove,
};
int __init tah_init(void)
{
return platform_driver_register(&tah_driver);
}
void tah_exit(void)
{
platform_driver_unregister(&tah_driver);
}

파일 보기

@@ -0,0 +1,95 @@
/*
* drivers/net/ibm_newemac/tah.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright 2004 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __IBM_NEWEMAC_TAH_H
#define __IBM_NEWEMAC_TAH_H
/* TAH */
struct tah_regs {
u32 revid;
u32 pad[3];
u32 mr;
u32 ssr0;
u32 ssr1;
u32 ssr2;
u32 ssr3;
u32 ssr4;
u32 ssr5;
u32 tsr;
};
/* TAH device */
struct tah_instance {
struct tah_regs __iomem *base;
/* Only one EMAC whacks us at a time */
struct mutex lock;
/* number of EMACs using this TAH */
int users;
/* OF device instance */
struct platform_device *ofdev;
};
/* TAH engine */
#define TAH_MR_CVR 0x80000000
#define TAH_MR_SR 0x40000000
#define TAH_MR_ST_256 0x01000000
#define TAH_MR_ST_512 0x02000000
#define TAH_MR_ST_768 0x03000000
#define TAH_MR_ST_1024 0x04000000
#define TAH_MR_ST_1280 0x05000000
#define TAH_MR_ST_1536 0x06000000
#define TAH_MR_TFS_16KB 0x00000000
#define TAH_MR_TFS_2KB 0x00200000
#define TAH_MR_TFS_4KB 0x00400000
#define TAH_MR_TFS_6KB 0x00600000
#define TAH_MR_TFS_8KB 0x00800000
#define TAH_MR_TFS_10KB 0x00a00000
#define TAH_MR_DTFP 0x00100000
#define TAH_MR_DIG 0x00080000
#ifdef CONFIG_IBM_NEW_EMAC_TAH
extern int tah_init(void);
extern void tah_exit(void);
extern int tah_attach(struct platform_device *ofdev, int channel);
extern void tah_detach(struct platform_device *ofdev, int channel);
extern void tah_reset(struct platform_device *ofdev);
extern int tah_get_regs_len(struct platform_device *ofdev);
extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
#else
# define tah_init() 0
# define tah_exit() do { } while(0)
# define tah_attach(x,y) (-ENXIO)
# define tah_detach(x,y) do { } while(0)
# define tah_reset(x) do { } while(0)
# define tah_get_regs_len(x) 0
# define tah_dump_regs(x,buf) (buf)
#endif /* !CONFIG_IBM_NEW_EMAC_TAH */
#endif /* __IBM_NEWEMAC_TAH_H */

파일 보기

@@ -0,0 +1,332 @@
/*
* drivers/net/ibm_newemac/zmii.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright (c) 2004, 2005 Zultys Technologies.
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* Based on original work by
* Armin Kuster <akuster@mvista.com>
* Copyright 2001 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <asm/io.h>
#include "emac.h"
#include "core.h"
/* ZMIIx_FER */
#define ZMII_FER_MDI(idx) (0x80000000 >> ((idx) * 4))
#define ZMII_FER_MDI_ALL (ZMII_FER_MDI(0) | ZMII_FER_MDI(1) | \
ZMII_FER_MDI(2) | ZMII_FER_MDI(3))
#define ZMII_FER_SMII(idx) (0x40000000 >> ((idx) * 4))
#define ZMII_FER_RMII(idx) (0x20000000 >> ((idx) * 4))
#define ZMII_FER_MII(idx) (0x10000000 >> ((idx) * 4))
/* ZMIIx_SSR */
#define ZMII_SSR_SCI(idx) (0x40000000 >> ((idx) * 4))
#define ZMII_SSR_FSS(idx) (0x20000000 >> ((idx) * 4))
#define ZMII_SSR_SP(idx) (0x10000000 >> ((idx) * 4))
/* ZMII only supports MII, RMII and SMII
* we also support autodetection for backward compatibility
*/
static inline int zmii_valid_mode(int mode)
{
return mode == PHY_MODE_MII ||
mode == PHY_MODE_RMII ||
mode == PHY_MODE_SMII ||
mode == PHY_MODE_NA;
}
static inline const char *zmii_mode_name(int mode)
{
switch (mode) {
case PHY_MODE_MII:
return "MII";
case PHY_MODE_RMII:
return "RMII";
case PHY_MODE_SMII:
return "SMII";
default:
BUG();
}
}
static inline u32 zmii_mode_mask(int mode, int input)
{
switch (mode) {
case PHY_MODE_MII:
return ZMII_FER_MII(input);
case PHY_MODE_RMII:
return ZMII_FER_RMII(input);
case PHY_MODE_SMII:
return ZMII_FER_SMII(input);
default:
return 0;
}
}
int __devinit zmii_attach(struct platform_device *ofdev, int input, int *mode)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct zmii_regs __iomem *p = dev->base;
ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode);
if (!zmii_valid_mode(*mode)) {
/* Probably an EMAC connected to RGMII,
* but it still may need ZMII for MDIO so
* we don't fail here.
*/
dev->users++;
return 0;
}
mutex_lock(&dev->lock);
/* Autodetect ZMII mode if not specified.
* This is only for backward compatibility with the old driver.
* Please, always specify PHY mode in your board port to avoid
* any surprises.
*/
if (dev->mode == PHY_MODE_NA) {
if (*mode == PHY_MODE_NA) {
u32 r = dev->fer_save;
ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
dev->mode = PHY_MODE_MII;
else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
dev->mode = PHY_MODE_RMII;
else
dev->mode = PHY_MODE_SMII;
} else
dev->mode = *mode;
printk(KERN_NOTICE "%s: bridge in %s mode\n",
ofdev->dev.of_node->full_name,
zmii_mode_name(dev->mode));
} else {
/* All inputs must use the same mode */
if (*mode != PHY_MODE_NA && *mode != dev->mode) {
printk(KERN_ERR
"%s: invalid mode %d specified for input %d\n",
ofdev->dev.of_node->full_name, *mode, input);
mutex_unlock(&dev->lock);
return -EINVAL;
}
}
/* Report back correct PHY mode,
* it may be used during PHY initialization.
*/
*mode = dev->mode;
/* Enable this input */
out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input));
++dev->users;
mutex_unlock(&dev->lock);
return 0;
}
void zmii_get_mdio(struct platform_device *ofdev, int input)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
u32 fer;
ZMII_DBG2(dev, "get_mdio(%d)" NL, input);
mutex_lock(&dev->lock);
fer = in_be32(&dev->base->fer) & ~ZMII_FER_MDI_ALL;
out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input));
}
void zmii_put_mdio(struct platform_device *ofdev, int input)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
ZMII_DBG2(dev, "put_mdio(%d)" NL, input);
mutex_unlock(&dev->lock);
}
void zmii_set_speed(struct platform_device *ofdev, int input, int speed)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
u32 ssr;
mutex_lock(&dev->lock);
ssr = in_be32(&dev->base->ssr);
ZMII_DBG(dev, "speed(%d, %d)" NL, input, speed);
if (speed == SPEED_100)
ssr |= ZMII_SSR_SP(input);
else
ssr &= ~ZMII_SSR_SP(input);
out_be32(&dev->base->ssr, ssr);
mutex_unlock(&dev->lock);
}
void zmii_detach(struct platform_device *ofdev, int input)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
BUG_ON(!dev || dev->users == 0);
mutex_lock(&dev->lock);
ZMII_DBG(dev, "detach(%d)" NL, input);
/* Disable this input */
out_be32(&dev->base->fer,
in_be32(&dev->base->fer) & ~zmii_mode_mask(dev->mode, input));
--dev->users;
mutex_unlock(&dev->lock);
}
int zmii_get_regs_len(struct platform_device *ofdev)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct zmii_regs);
}
void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct emac_ethtool_regs_subhdr *hdr = buf;
struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1);
hdr->version = 0;
hdr->index = 0; /* for now, are there chips with more than one
* zmii ? if yes, then we'll add a cell_index
* like we do for emac
*/
memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs));
return regs + 1;
}
static int __devinit zmii_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct zmii_instance *dev;
struct resource regs;
int rc;
rc = -ENOMEM;
dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL);
if (dev == NULL) {
printk(KERN_ERR "%s: could not allocate ZMII device!\n",
np->full_name);
goto err_gone;
}
mutex_init(&dev->lock);
dev->ofdev = ofdev;
dev->mode = PHY_MODE_NA;
rc = -ENXIO;
if (of_address_to_resource(np, 0, &regs)) {
printk(KERN_ERR "%s: Can't get registers address\n",
np->full_name);
goto err_free;
}
rc = -ENOMEM;
dev->base = (struct zmii_regs __iomem *)ioremap(regs.start,
sizeof(struct zmii_regs));
if (dev->base == NULL) {
printk(KERN_ERR "%s: Can't map device registers!\n",
np->full_name);
goto err_free;
}
/* We may need FER value for autodetection later */
dev->fer_save = in_be32(&dev->base->fer);
/* Disable all inputs by default */
out_be32(&dev->base->fer, 0);
printk(KERN_INFO
"ZMII %s initialized\n", ofdev->dev.of_node->full_name);
wmb();
dev_set_drvdata(&ofdev->dev, dev);
return 0;
err_free:
kfree(dev);
err_gone:
return rc;
}
static int __devexit zmii_remove(struct platform_device *ofdev)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
dev_set_drvdata(&ofdev->dev, NULL);
WARN_ON(dev->users != 0);
iounmap(dev->base);
kfree(dev);
return 0;
}
static struct of_device_id zmii_match[] =
{
{
.compatible = "ibm,zmii",
},
/* For backward compat with old DT */
{
.type = "emac-zmii",
},
{},
};
static struct platform_driver zmii_driver = {
.driver = {
.name = "emac-zmii",
.owner = THIS_MODULE,
.of_match_table = zmii_match,
},
.probe = zmii_probe,
.remove = zmii_remove,
};
int __init zmii_init(void)
{
return platform_driver_register(&zmii_driver);
}
void zmii_exit(void)
{
platform_driver_unregister(&zmii_driver);
}

파일 보기

@@ -0,0 +1,78 @@
/*
* drivers/net/ibm_newemac/zmii.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* Copyright (c) 2004, 2005 Zultys Technologies.
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
*
* Based on original work by
* Armin Kuster <akuster@mvista.com>
* Copyright 2001 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __IBM_NEWEMAC_ZMII_H
#define __IBM_NEWEMAC_ZMII_H
/* ZMII bridge registers */
struct zmii_regs {
u32 fer; /* Function enable reg */
u32 ssr; /* Speed select reg */
u32 smiirs; /* SMII status reg */
};
/* ZMII device */
struct zmii_instance {
struct zmii_regs __iomem *base;
/* Only one EMAC whacks us at a time */
struct mutex lock;
/* subset of PHY_MODE_XXXX */
int mode;
/* number of EMACs using this ZMII bridge */
int users;
/* FER value left by firmware */
u32 fer_save;
/* OF device instance */
struct platform_device *ofdev;
};
#ifdef CONFIG_IBM_NEW_EMAC_ZMII
extern int zmii_init(void);
extern void zmii_exit(void);
extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
extern void zmii_detach(struct platform_device *ofdev, int input);
extern void zmii_get_mdio(struct platform_device *ofdev, int input);
extern void zmii_put_mdio(struct platform_device *ofdev, int input);
extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
extern int zmii_get_regs_len(struct platform_device *ocpdev);
extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
# define zmii_init() 0
# define zmii_exit() do { } while(0)
# define zmii_attach(x,y,z) (-ENXIO)
# define zmii_detach(x,y) do { } while(0)
# define zmii_get_mdio(x,y) do { } while(0)
# define zmii_put_mdio(x,y) do { } while(0)
# define zmii_set_speed(x,y,z) do { } while(0)
# define zmii_get_regs_len(x) 0
# define zmii_dump_regs(x,buf) (buf)
#endif /* !CONFIG_IBM_NEW_EMAC_ZMII */
#endif /* __IBM_NEWEMAC_ZMII_H */

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다. Load Diff

파일 보기

@@ -0,0 +1,195 @@
/*
* IBM Power Virtual Ethernet Device Driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2003, 2010
*
* Authors: Dave Larson <larson1@us.ibm.com>
* Santiago Leon <santil@linux.vnet.ibm.com>
* Brian King <brking@linux.vnet.ibm.com>
* Robert Jennings <rcj@linux.vnet.ibm.com>
* Anton Blanchard <anton@au.ibm.com>
*/
#ifndef _IBMVETH_H
#define _IBMVETH_H
/* constants for H_MULTICAST_CTRL */
#define IbmVethMcastReceptionModifyBit 0x80000UL
#define IbmVethMcastReceptionEnableBit 0x20000UL
#define IbmVethMcastFilterModifyBit 0x40000UL
#define IbmVethMcastFilterEnableBit 0x10000UL
#define IbmVethMcastEnableRecv (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
#define IbmVethMcastDisableRecv (IbmVethMcastReceptionModifyBit)
#define IbmVethMcastEnableFiltering (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
#define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
#define IbmVethMcastAddFilter 0x1UL
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
#define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002UL
#define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001UL
/* hcall macros */
#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
#define h_free_logical_lan(ua) \
plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
static inline long h_send_logical_lan(unsigned long unit_address,
unsigned long desc1, unsigned long desc2, unsigned long desc3,
unsigned long desc4, unsigned long desc5, unsigned long desc6,
unsigned long corellator_in, unsigned long *corellator_out)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
desc2, desc3, desc4, desc5, desc6, corellator_in);
*corellator_out = retbuf[0];
return rc;
}
static inline long h_illan_attributes(unsigned long unit_address,
unsigned long reset_mask, unsigned long set_mask,
unsigned long *ret_attributes)
{
long rc;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
reset_mask, set_mask);
*ret_attributes = retbuf[0];
return rc;
}
#define h_multicast_ctrl(ua, cmd, mac) \
plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
#define h_change_logical_lan_mac(ua, mac) \
plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
#define IBMVETH_NUM_BUFF_POOLS 5
#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
#define IBMVETH_MIN_MTU 68
#define IBMVETH_MAX_POOL_COUNT 4096
#define IBMVETH_BUFF_LIST_SIZE 4096
#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
static int pool_active[] = { 1, 1, 0, 0, 0};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
struct ibmveth_buff_pool {
u32 size;
u32 index;
u32 buff_size;
u32 threshold;
atomic_t available;
u32 consumer_index;
u32 producer_index;
u16 *free_map;
dma_addr_t *dma_addr;
struct sk_buff **skbuff;
int active;
struct kobject kobj;
};
struct ibmveth_rx_q {
u64 index;
u64 num_slots;
u64 toggle;
dma_addr_t queue_dma;
u32 queue_len;
struct ibmveth_rx_q_entry *queue_addr;
};
struct ibmveth_adapter {
struct vio_dev *vdev;
struct net_device *netdev;
struct napi_struct napi;
struct net_device_stats stats;
unsigned int mcastFilterSize;
unsigned long mac_addr;
void * buffer_list_addr;
void * filter_list_addr;
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
struct ibmveth_rx_q rx_queue;
int pool_config;
int rx_csum;
void *bounce_buffer;
dma_addr_t bounce_buffer_dma;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
/* adapter specific stats */
u64 replenish_task_cycles;
u64 replenish_no_mem;
u64 replenish_add_buff_failure;
u64 replenish_add_buff_success;
u64 rx_invalid_buffer;
u64 rx_no_buffer;
u64 tx_map_failed;
u64 tx_send_failed;
};
struct ibmveth_buf_desc_fields {
u32 flags_len;
#define IBMVETH_BUF_VALID 0x80000000
#define IBMVETH_BUF_TOGGLE 0x40000000
#define IBMVETH_BUF_NO_CSUM 0x02000000
#define IBMVETH_BUF_CSUM_GOOD 0x01000000
#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
u32 address;
};
union ibmveth_buf_desc {
u64 desc;
struct ibmveth_buf_desc_fields fields;
};
struct ibmveth_rx_q_entry {
u32 flags_off;
#define IBMVETH_RXQ_TOGGLE 0x80000000
#define IBMVETH_RXQ_TOGGLE_SHIFT 31
#define IBMVETH_RXQ_VALID 0x40000000
#define IBMVETH_RXQ_NO_CSUM 0x02000000
#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
u32 length;
u64 correlator;
};
#endif /* _IBMVETH_H */

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다. Load Diff