Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (74 commits) [SCSI] sg: fix q->queue_lock on scsi_error_handler path [SCSI] replace __inline with inline [SCSI] a2091: make 2 functions static [SCSI] a3000: make 2 functions static [SCSI] ses: #if 0 the unused ses_match_host() [SCSI] use kmem_cache_zalloc instead of kmem_cache_alloc/memset [SCSI] sg: fix iovec bugs introduced by the block layer conversion [SCSI] qlogicpti: use request_firmware [SCSI] advansys: use request_firmware [SCSI] qla1280: use request_firmware [SCSI] libiscsi: fix iscsi pool error path [SCSI] cxgb3i: call ddp release function directly [SCSI] cxgb3i: merge cxgb3i_ddp into cxgb3i module [SCSI] cxgb3i: close all tcp connections upon chip reset [SCSI] cxgb3i: re-read ddp settings information after chip reset [SCSI] cxgb3i: re-initialize ddp settings after chip reset [SCSI] cxgb3i: subscribe to error notification from cxgb3 driver [SCSI] aacraid driver update [SCSI] mptsas: remove unneeded check [SCSI] config: Make need for SCSI_CDROM clearer ...
This commit is contained in:
@@ -121,10 +121,11 @@ config BLK_DEV_SR
|
||||
tristate "SCSI CDROM support"
|
||||
depends on SCSI
|
||||
---help---
|
||||
If you want to use a SCSI or FireWire CD-ROM under Linux,
|
||||
say Y and read the SCSI-HOWTO and the CDROM-HOWTO at
|
||||
<http://www.tldp.org/docs.html#howto>. Also make sure to say
|
||||
Y or M to "ISO 9660 CD-ROM file system support" later.
|
||||
If you want to use a CD or DVD drive attached to your computer
|
||||
by SCSI, FireWire, USB or ATAPI, say Y and read the SCSI-HOWTO
|
||||
and the CDROM-HOWTO at <http://www.tldp.org/docs.html#howto>.
|
||||
|
||||
Make sure to say Y or M to "ISO 9660 CD-ROM file system support".
|
||||
|
||||
To compile this driver as a module, choose M here and read
|
||||
<file:Documentation/scsi/scsi.txt>.
|
||||
@@ -614,10 +615,16 @@ config LIBFC
|
||||
---help---
|
||||
Fibre Channel library module
|
||||
|
||||
config LIBFCOE
|
||||
tristate "LibFCoE module"
|
||||
select LIBFC
|
||||
---help---
|
||||
Library for Fibre Channel over Ethernet module
|
||||
|
||||
config FCOE
|
||||
tristate "FCoE module"
|
||||
depends on PCI
|
||||
select LIBFC
|
||||
select LIBFCOE
|
||||
---help---
|
||||
Fibre Channel over Ethernet module
|
||||
|
||||
|
@@ -37,6 +37,7 @@ obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
|
||||
obj-$(CONFIG_SCSI_DH) += device_handler/
|
||||
|
||||
obj-$(CONFIG_LIBFC) += libfc/
|
||||
obj-$(CONFIG_LIBFCOE) += fcoe/
|
||||
obj-$(CONFIG_FCOE) += fcoe/
|
||||
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
|
||||
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
|
||||
|
@@ -23,6 +23,8 @@
|
||||
#define DMA(ptr) ((a2091_scsiregs *)((ptr)->base))
|
||||
#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
|
||||
|
||||
static int a2091_release(struct Scsi_Host *instance);
|
||||
|
||||
static irqreturn_t a2091_intr (int irq, void *_instance)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -144,7 +146,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
}
|
||||
}
|
||||
|
||||
int __init a2091_detect(struct scsi_host_template *tpnt)
|
||||
static int __init a2091_detect(struct scsi_host_template *tpnt)
|
||||
{
|
||||
static unsigned char called = 0;
|
||||
struct Scsi_Host *instance;
|
||||
@@ -233,7 +235,7 @@ static struct scsi_host_template driver_template = {
|
||||
|
||||
#include "scsi_module.c"
|
||||
|
||||
int a2091_release(struct Scsi_Host *instance)
|
||||
static int a2091_release(struct Scsi_Host *instance)
|
||||
{
|
||||
#ifdef MODULE
|
||||
DMA(instance)->CNTR = 0;
|
||||
|
@@ -11,9 +11,6 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
int a2091_detect(struct scsi_host_template *);
|
||||
int a2091_release(struct Scsi_Host *);
|
||||
|
||||
#ifndef CMD_PER_LUN
|
||||
#define CMD_PER_LUN 2
|
||||
#endif
|
||||
|
@@ -25,6 +25,8 @@
|
||||
|
||||
static struct Scsi_Host *a3000_host = NULL;
|
||||
|
||||
static int a3000_release(struct Scsi_Host *instance);
|
||||
|
||||
static irqreturn_t a3000_intr (int irq, void *dummy)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -157,7 +159,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
}
|
||||
}
|
||||
|
||||
int __init a3000_detect(struct scsi_host_template *tpnt)
|
||||
static int __init a3000_detect(struct scsi_host_template *tpnt)
|
||||
{
|
||||
wd33c93_regs regs;
|
||||
|
||||
@@ -232,7 +234,7 @@ static struct scsi_host_template driver_template = {
|
||||
|
||||
#include "scsi_module.c"
|
||||
|
||||
int a3000_release(struct Scsi_Host *instance)
|
||||
static int a3000_release(struct Scsi_Host *instance)
|
||||
{
|
||||
wd33c93_release();
|
||||
DMA(instance)->CNTR = 0;
|
||||
|
@@ -11,9 +11,6 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
int a3000_detect(struct scsi_host_template *);
|
||||
int a3000_release(struct Scsi_Host *);
|
||||
|
||||
#ifndef CMD_PER_LUN
|
||||
#define CMD_PER_LUN 2
|
||||
#endif
|
||||
|
@@ -143,7 +143,7 @@ static char *aac_get_status_string(u32 status);
|
||||
*/
|
||||
|
||||
static int nondasd = -1;
|
||||
static int aac_cache;
|
||||
static int aac_cache = 2; /* WCE=0 to avoid performance problems */
|
||||
static int dacmode = -1;
|
||||
int aac_msi;
|
||||
int aac_commit = -1;
|
||||
@@ -157,7 +157,7 @@ module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
|
||||
"\tbit 0 - Disable FUA in WRITE SCSI commands\n"
|
||||
"\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
|
||||
"\tbit 2 - Disable only if Battery not protecting Cache");
|
||||
"\tbit 2 - Disable only if Battery is protecting Cache");
|
||||
module_param(dacmode, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
|
||||
" 0=off, 1=on");
|
||||
@@ -217,6 +217,14 @@ int aac_reset_devices;
|
||||
module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
|
||||
|
||||
int aac_wwn = 1;
|
||||
module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
|
||||
"\t0 - Disable\n"
|
||||
"\t1 - Array Meta Data Signature (default)\n"
|
||||
"\t2 - Adapter Serial Number");
|
||||
|
||||
|
||||
static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
|
||||
struct fib *fibptr) {
|
||||
struct scsi_device *device;
|
||||
@@ -1206,9 +1214,8 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
|
||||
|
||||
static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
|
||||
{
|
||||
if ((sizeof(dma_addr_t) > 4) &&
|
||||
(num_physpages > (0xFFFFFFFFULL >> PAGE_SHIFT)) &&
|
||||
(fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
|
||||
if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
|
||||
(fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
|
||||
return FAILED;
|
||||
return aac_scsi_32(fib, cmd);
|
||||
}
|
||||
@@ -1371,8 +1378,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
if (dev->nondasd_support && !dev->in_reset)
|
||||
printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
|
||||
|
||||
if (dma_get_required_mask(&dev->pdev->dev) > DMA_32BIT_MASK)
|
||||
dev->needs_dac = 1;
|
||||
dev->dac_support = 0;
|
||||
if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
|
||||
if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
|
||||
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
|
||||
if (!dev->in_reset)
|
||||
printk(KERN_INFO "%s%d: 64bit support enabled.\n",
|
||||
dev->name, dev->id);
|
||||
@@ -1382,6 +1392,15 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
if(dacmode != -1) {
|
||||
dev->dac_support = (dacmode!=0);
|
||||
}
|
||||
|
||||
/* avoid problems with AAC_QUIRK_SCSI_32 controllers */
|
||||
if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
|
||||
& AAC_QUIRK_SCSI_32)) {
|
||||
dev->nondasd_support = 0;
|
||||
dev->jbod = 0;
|
||||
expose_physicals = 0;
|
||||
}
|
||||
|
||||
if(dev->dac_support != 0) {
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) &&
|
||||
!pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) {
|
||||
@@ -2058,7 +2077,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
|
||||
memset(&inq_data, 0, sizeof (struct inquiry_data));
|
||||
|
||||
if (scsicmd->cmnd[1] & 0x1) {
|
||||
if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
|
||||
char *arr = (char *)&inq_data;
|
||||
|
||||
/* EVPD bit set */
|
||||
@@ -2081,7 +2100,12 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
arr[1] = scsicmd->cmnd[2];
|
||||
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
|
||||
sizeof(inq_data));
|
||||
return aac_get_container_serial(scsicmd);
|
||||
if (aac_wwn != 2)
|
||||
return aac_get_container_serial(
|
||||
scsicmd);
|
||||
/* SLES 10 SP1 special */
|
||||
scsicmd->result = DID_OK << 16 |
|
||||
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
|
||||
} else {
|
||||
/* vpd page not implemented */
|
||||
scsicmd->result = DID_OK << 16 |
|
||||
|
@@ -12,7 +12,7 @@
|
||||
*----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef AAC_DRIVER_BUILD
|
||||
# define AAC_DRIVER_BUILD 2456
|
||||
# define AAC_DRIVER_BUILD 2461
|
||||
# define AAC_DRIVER_BRANCH "-ms"
|
||||
#endif
|
||||
#define MAXIMUM_NUM_CONTAINERS 32
|
||||
@@ -865,7 +865,11 @@ struct aac_supplement_adapter_info
|
||||
u8 MfgPcbaSerialNo[12];
|
||||
u8 MfgWWNName[8];
|
||||
__le32 SupportedOptions2;
|
||||
__le32 ReservedGrowth[1];
|
||||
__le32 StructExpansion;
|
||||
/* StructExpansion == 1 */
|
||||
__le32 FeatureBits3;
|
||||
__le32 SupportedPerformanceModes;
|
||||
__le32 ReservedForFutureGrowth[80];
|
||||
};
|
||||
#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
|
||||
#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
|
||||
@@ -1020,6 +1024,7 @@ struct aac_dev
|
||||
u8 jbod;
|
||||
u8 cache_protected;
|
||||
u8 dac_support;
|
||||
u8 needs_dac;
|
||||
u8 raid_scsi_mode;
|
||||
u8 comm_interface;
|
||||
# define AAC_COMM_PRODUCER 0
|
||||
|
@@ -54,6 +54,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
|
||||
const unsigned long printfbufsiz = 256;
|
||||
struct aac_init *init;
|
||||
dma_addr_t phys;
|
||||
unsigned long aac_max_hostphysmempages;
|
||||
|
||||
size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
|
||||
|
||||
@@ -90,7 +91,18 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
|
||||
init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
|
||||
init->AdapterFibsSize = cpu_to_le32(fibsize);
|
||||
init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
|
||||
init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
|
||||
/*
|
||||
* number of 4k pages of host physical memory. The aacraid fw needs
|
||||
* this number to be less than 4gb worth of pages. New firmware doesn't
|
||||
* have any issues with the mapping system, but older Firmware did, and
|
||||
* had *troubles* dealing with the math overloading past 32 bits, thus
|
||||
* we must limit this field.
|
||||
*/
|
||||
aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12;
|
||||
if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
|
||||
init->HostPhysMemPages = cpu_to_le32(aac_max_hostphysmempages);
|
||||
else
|
||||
init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
|
||||
|
||||
init->InitFlags = 0;
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE) {
|
||||
|
@@ -86,7 +86,13 @@ char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
|
||||
*
|
||||
* Note: The last field is used to index into aac_drivers below.
|
||||
*/
|
||||
static struct pci_device_id aac_pci_tbl[] = {
|
||||
#ifdef DECLARE_PCI_DEVICE_TABLE
|
||||
static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
|
||||
#elif defined(__devinitconst)
|
||||
static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
|
||||
#else
|
||||
static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
|
||||
#endif
|
||||
{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
|
||||
{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
|
||||
{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
|
||||
|
File diff soppresso perché troppo grande
Carica Diff
@@ -1034,7 +1034,7 @@ ahd_intr(struct ahd_softc *ahd)
|
||||
}
|
||||
|
||||
/******************************** Private Inlines *****************************/
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_assert_atn(struct ahd_softc *ahd)
|
||||
{
|
||||
ahd_outb(ahd, SCSISIGO, ATNO);
|
||||
@@ -1069,7 +1069,7 @@ ahd_currently_packetized(struct ahd_softc *ahd)
|
||||
return (packetized);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
static inline int
|
||||
ahd_set_active_fifo(struct ahd_softc *ahd)
|
||||
{
|
||||
u_int active_fifo;
|
||||
@@ -1086,7 +1086,7 @@ ahd_set_active_fifo(struct ahd_softc *ahd)
|
||||
}
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl)
|
||||
{
|
||||
ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL);
|
||||
@@ -1096,7 +1096,7 @@ ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl)
|
||||
* Determine whether the sequencer reported a residual
|
||||
* for this SCB/transaction.
|
||||
*/
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_update_residual(struct ahd_softc *ahd, struct scb *scb)
|
||||
{
|
||||
uint32_t sgptr;
|
||||
@@ -1106,7 +1106,7 @@ ahd_update_residual(struct ahd_softc *ahd, struct scb *scb)
|
||||
ahd_calc_residual(ahd, scb);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb)
|
||||
{
|
||||
uint32_t sgptr;
|
||||
@@ -7987,7 +7987,7 @@ ahd_resume(struct ahd_softc *ahd)
|
||||
* scbid that should be restored once manipualtion
|
||||
* of the TCL entry is complete.
|
||||
*/
|
||||
static __inline u_int
|
||||
static inline u_int
|
||||
ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl)
|
||||
{
|
||||
/*
|
||||
|
@@ -46,21 +46,20 @@
|
||||
#define _AIC79XX_INLINE_H_
|
||||
|
||||
/******************************** Debugging ***********************************/
|
||||
static __inline char *ahd_name(struct ahd_softc *ahd);
|
||||
static inline char *ahd_name(struct ahd_softc *ahd);
|
||||
|
||||
static __inline char *
|
||||
ahd_name(struct ahd_softc *ahd)
|
||||
static inline char *ahd_name(struct ahd_softc *ahd)
|
||||
{
|
||||
return (ahd->name);
|
||||
}
|
||||
|
||||
/************************ Sequencer Execution Control *************************/
|
||||
static __inline void ahd_known_modes(struct ahd_softc *ahd,
|
||||
static inline void ahd_known_modes(struct ahd_softc *ahd,
|
||||
ahd_mode src, ahd_mode dst);
|
||||
static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
|
||||
static inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
|
||||
ahd_mode src,
|
||||
ahd_mode dst);
|
||||
static __inline void ahd_extract_mode_state(struct ahd_softc *ahd,
|
||||
static inline void ahd_extract_mode_state(struct ahd_softc *ahd,
|
||||
ahd_mode_state state,
|
||||
ahd_mode *src, ahd_mode *dst);
|
||||
|
||||
@@ -73,7 +72,7 @@ int ahd_is_paused(struct ahd_softc *ahd);
|
||||
void ahd_pause(struct ahd_softc *ahd);
|
||||
void ahd_unpause(struct ahd_softc *ahd);
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
|
||||
{
|
||||
ahd->src_mode = src;
|
||||
@@ -82,13 +81,13 @@ ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
|
||||
ahd->saved_dst_mode = dst;
|
||||
}
|
||||
|
||||
static __inline ahd_mode_state
|
||||
static inline ahd_mode_state
|
||||
ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
|
||||
{
|
||||
return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
|
||||
ahd_mode *src, ahd_mode *dst)
|
||||
{
|
||||
@@ -102,13 +101,12 @@ void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
|
||||
bus_size_t len, int last);
|
||||
|
||||
/************************** Memory mapping routines ***************************/
|
||||
static __inline size_t ahd_sg_size(struct ahd_softc *ahd);
|
||||
static inline size_t ahd_sg_size(struct ahd_softc *ahd);
|
||||
|
||||
void ahd_sync_sglist(struct ahd_softc *ahd,
|
||||
struct scb *scb, int op);
|
||||
|
||||
static __inline size_t
|
||||
ahd_sg_size(struct ahd_softc *ahd)
|
||||
static inline size_t ahd_sg_size(struct ahd_softc *ahd)
|
||||
{
|
||||
if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
|
||||
return (sizeof(struct ahd_dma64_seg));
|
||||
@@ -141,11 +139,9 @@ struct scb *
|
||||
ahd_lookup_scb(struct ahd_softc *ahd, u_int tag);
|
||||
void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
|
||||
|
||||
static __inline uint8_t *
|
||||
ahd_get_sense_buf(struct ahd_softc *ahd,
|
||||
static inline uint8_t *ahd_get_sense_buf(struct ahd_softc *ahd,
|
||||
struct scb *scb);
|
||||
static __inline uint32_t
|
||||
ahd_get_sense_bufaddr(struct ahd_softc *ahd,
|
||||
static inline uint32_t ahd_get_sense_bufaddr(struct ahd_softc *ahd,
|
||||
struct scb *scb);
|
||||
|
||||
#if 0 /* unused */
|
||||
@@ -158,13 +154,13 @@ do { \
|
||||
|
||||
#endif
|
||||
|
||||
static __inline uint8_t *
|
||||
static inline uint8_t *
|
||||
ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
|
||||
{
|
||||
return (scb->sense_data);
|
||||
}
|
||||
|
||||
static __inline uint32_t
|
||||
static inline uint32_t
|
||||
ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
|
||||
{
|
||||
return (scb->sense_busaddr);
|
||||
|
@@ -395,19 +395,19 @@ struct info_str {
|
||||
};
|
||||
|
||||
/******************************** Locking *************************************/
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_lockinit(struct ahd_softc *ahd)
|
||||
{
|
||||
spin_lock_init(&ahd->platform_data->spin_lock);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_lock(struct ahd_softc *ahd, unsigned long *flags)
|
||||
{
|
||||
spin_lock_irqsave(&ahd->platform_data->spin_lock, *flags);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);
|
||||
@@ -490,29 +490,29 @@ void ahd_pci_write_config(ahd_dev_softc_t pci,
|
||||
int reg, uint32_t value,
|
||||
int width);
|
||||
|
||||
static __inline int ahd_get_pci_function(ahd_dev_softc_t);
|
||||
static __inline int
|
||||
static inline int ahd_get_pci_function(ahd_dev_softc_t);
|
||||
static inline int
|
||||
ahd_get_pci_function(ahd_dev_softc_t pci)
|
||||
{
|
||||
return (PCI_FUNC(pci->devfn));
|
||||
}
|
||||
|
||||
static __inline int ahd_get_pci_slot(ahd_dev_softc_t);
|
||||
static __inline int
|
||||
static inline int ahd_get_pci_slot(ahd_dev_softc_t);
|
||||
static inline int
|
||||
ahd_get_pci_slot(ahd_dev_softc_t pci)
|
||||
{
|
||||
return (PCI_SLOT(pci->devfn));
|
||||
}
|
||||
|
||||
static __inline int ahd_get_pci_bus(ahd_dev_softc_t);
|
||||
static __inline int
|
||||
static inline int ahd_get_pci_bus(ahd_dev_softc_t);
|
||||
static inline int
|
||||
ahd_get_pci_bus(ahd_dev_softc_t pci)
|
||||
{
|
||||
return (pci->bus->number);
|
||||
}
|
||||
|
||||
static __inline void ahd_flush_device_writes(struct ahd_softc *);
|
||||
static __inline void
|
||||
static inline void ahd_flush_device_writes(struct ahd_softc *);
|
||||
static inline void
|
||||
ahd_flush_device_writes(struct ahd_softc *ahd)
|
||||
{
|
||||
/* XXX Is this sufficient for all architectures??? */
|
||||
@@ -524,81 +524,81 @@ int ahd_linux_proc_info(struct Scsi_Host *, char *, char **,
|
||||
off_t, int, int);
|
||||
|
||||
/*********************** Transaction Access Wrappers **************************/
|
||||
static __inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
|
||||
static __inline void ahd_set_transaction_status(struct scb *, uint32_t);
|
||||
static __inline void ahd_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
|
||||
static __inline void ahd_set_scsi_status(struct scb *, uint32_t);
|
||||
static __inline uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd);
|
||||
static __inline uint32_t ahd_get_transaction_status(struct scb *);
|
||||
static __inline uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd);
|
||||
static __inline uint32_t ahd_get_scsi_status(struct scb *);
|
||||
static __inline void ahd_set_transaction_tag(struct scb *, int, u_int);
|
||||
static __inline u_long ahd_get_transfer_length(struct scb *);
|
||||
static __inline int ahd_get_transfer_dir(struct scb *);
|
||||
static __inline void ahd_set_residual(struct scb *, u_long);
|
||||
static __inline void ahd_set_sense_residual(struct scb *scb, u_long resid);
|
||||
static __inline u_long ahd_get_residual(struct scb *);
|
||||
static __inline u_long ahd_get_sense_residual(struct scb *);
|
||||
static __inline int ahd_perform_autosense(struct scb *);
|
||||
static __inline uint32_t ahd_get_sense_bufsize(struct ahd_softc *,
|
||||
static inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
|
||||
static inline void ahd_set_transaction_status(struct scb *, uint32_t);
|
||||
static inline void ahd_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
|
||||
static inline void ahd_set_scsi_status(struct scb *, uint32_t);
|
||||
static inline uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd);
|
||||
static inline uint32_t ahd_get_transaction_status(struct scb *);
|
||||
static inline uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd);
|
||||
static inline uint32_t ahd_get_scsi_status(struct scb *);
|
||||
static inline void ahd_set_transaction_tag(struct scb *, int, u_int);
|
||||
static inline u_long ahd_get_transfer_length(struct scb *);
|
||||
static inline int ahd_get_transfer_dir(struct scb *);
|
||||
static inline void ahd_set_residual(struct scb *, u_long);
|
||||
static inline void ahd_set_sense_residual(struct scb *scb, u_long resid);
|
||||
static inline u_long ahd_get_residual(struct scb *);
|
||||
static inline u_long ahd_get_sense_residual(struct scb *);
|
||||
static inline int ahd_perform_autosense(struct scb *);
|
||||
static inline uint32_t ahd_get_sense_bufsize(struct ahd_softc *,
|
||||
struct scb *);
|
||||
static __inline void ahd_notify_xfer_settings_change(struct ahd_softc *,
|
||||
static inline void ahd_notify_xfer_settings_change(struct ahd_softc *,
|
||||
struct ahd_devinfo *);
|
||||
static __inline void ahd_platform_scb_free(struct ahd_softc *ahd,
|
||||
static inline void ahd_platform_scb_free(struct ahd_softc *ahd,
|
||||
struct scb *scb);
|
||||
static __inline void ahd_freeze_scb(struct scb *scb);
|
||||
static inline void ahd_freeze_scb(struct scb *scb);
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahd_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
|
||||
{
|
||||
cmd->result &= ~(CAM_STATUS_MASK << 16);
|
||||
cmd->result |= status << 16;
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahd_set_transaction_status(struct scb *scb, uint32_t status)
|
||||
{
|
||||
ahd_cmd_set_transaction_status(scb->io_ctx,status);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahd_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
|
||||
{
|
||||
cmd->result &= ~0xFFFF;
|
||||
cmd->result |= status;
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahd_set_scsi_status(struct scb *scb, uint32_t status)
|
||||
{
|
||||
ahd_cmd_set_scsi_status(scb->io_ctx, status);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return ((cmd->result >> 16) & CAM_STATUS_MASK);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
uint32_t ahd_get_transaction_status(struct scb *scb)
|
||||
{
|
||||
return (ahd_cmd_get_transaction_status(scb->io_ctx));
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return (cmd->result & 0xFFFF);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
uint32_t ahd_get_scsi_status(struct scb *scb)
|
||||
{
|
||||
return (ahd_cmd_get_scsi_status(scb->io_ctx));
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type)
|
||||
{
|
||||
/*
|
||||
@@ -607,43 +607,43 @@ void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type)
|
||||
*/
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
u_long ahd_get_transfer_length(struct scb *scb)
|
||||
{
|
||||
return (scb->platform_data->xfer_len);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
int ahd_get_transfer_dir(struct scb *scb)
|
||||
{
|
||||
return (scb->io_ctx->sc_data_direction);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahd_set_residual(struct scb *scb, u_long resid)
|
||||
{
|
||||
scsi_set_resid(scb->io_ctx, resid);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahd_set_sense_residual(struct scb *scb, u_long resid)
|
||||
{
|
||||
scb->platform_data->sense_resid = resid;
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
u_long ahd_get_residual(struct scb *scb)
|
||||
{
|
||||
return scsi_get_resid(scb->io_ctx);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
u_long ahd_get_sense_residual(struct scb *scb)
|
||||
{
|
||||
return (scb->platform_data->sense_resid);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
int ahd_perform_autosense(struct scb *scb)
|
||||
{
|
||||
/*
|
||||
@@ -654,20 +654,20 @@ int ahd_perform_autosense(struct scb *scb)
|
||||
return (1);
|
||||
}
|
||||
|
||||
static __inline uint32_t
|
||||
static inline uint32_t
|
||||
ahd_get_sense_bufsize(struct ahd_softc *ahd, struct scb *scb)
|
||||
{
|
||||
return (sizeof(struct scsi_sense_data));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_notify_xfer_settings_change(struct ahd_softc *ahd,
|
||||
struct ahd_devinfo *devinfo)
|
||||
{
|
||||
/* Nothing to do here for linux */
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_platform_scb_free(struct ahd_softc *ahd, struct scb *scb)
|
||||
{
|
||||
ahd->flags &= ~AHD_RESOURCE_SHORTAGE;
|
||||
@@ -678,7 +678,7 @@ void ahd_platform_free(struct ahd_softc *ahd);
|
||||
void ahd_platform_init(struct ahd_softc *ahd);
|
||||
void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahd_freeze_scb(struct scb *scb)
|
||||
{
|
||||
if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
|
||||
|
@@ -51,7 +51,7 @@
|
||||
|
||||
#include "aic79xx_pci.h"
|
||||
|
||||
static __inline uint64_t
|
||||
static inline uint64_t
|
||||
ahd_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor)
|
||||
{
|
||||
uint64_t id;
|
||||
@@ -377,14 +377,12 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
|
||||
error = ahd_init(ahd);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
ahd->init_level++;
|
||||
|
||||
/*
|
||||
* Allow interrupts now that we are completely setup.
|
||||
*/
|
||||
error = ahd_pci_map_int(ahd);
|
||||
if (!error)
|
||||
ahd->init_level++;
|
||||
return error;
|
||||
return ahd_pci_map_int(ahd);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@@ -55,10 +55,9 @@ void ahc_sync_sglist(struct ahc_softc *ahc,
|
||||
struct scb *scb, int op);
|
||||
|
||||
/******************************** Debugging ***********************************/
|
||||
static __inline char *ahc_name(struct ahc_softc *ahc);
|
||||
static inline char *ahc_name(struct ahc_softc *ahc);
|
||||
|
||||
static __inline char *
|
||||
ahc_name(struct ahc_softc *ahc)
|
||||
static inline char *ahc_name(struct ahc_softc *ahc)
|
||||
{
|
||||
return (ahc->name);
|
||||
}
|
||||
|
@@ -230,7 +230,7 @@ int ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t);
|
||||
#include "aic7xxx.h"
|
||||
|
||||
/***************************** Timer Facilities *******************************/
|
||||
static __inline void
|
||||
static inline void
|
||||
ahc_scb_timer_reset(struct scb *scb, u_int usec)
|
||||
{
|
||||
}
|
||||
@@ -401,19 +401,19 @@ struct info_str {
|
||||
/******************************** Locking *************************************/
|
||||
/* Lock protecting internal data structures */
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahc_lockinit(struct ahc_softc *ahc)
|
||||
{
|
||||
spin_lock_init(&ahc->platform_data->spin_lock);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahc_lock(struct ahc_softc *ahc, unsigned long *flags)
|
||||
{
|
||||
spin_lock_irqsave(&ahc->platform_data->spin_lock, *flags);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags);
|
||||
@@ -493,22 +493,22 @@ void ahc_pci_write_config(ahc_dev_softc_t pci,
|
||||
int reg, uint32_t value,
|
||||
int width);
|
||||
|
||||
static __inline int ahc_get_pci_function(ahc_dev_softc_t);
|
||||
static __inline int
|
||||
static inline int ahc_get_pci_function(ahc_dev_softc_t);
|
||||
static inline int
|
||||
ahc_get_pci_function(ahc_dev_softc_t pci)
|
||||
{
|
||||
return (PCI_FUNC(pci->devfn));
|
||||
}
|
||||
|
||||
static __inline int ahc_get_pci_slot(ahc_dev_softc_t);
|
||||
static __inline int
|
||||
static inline int ahc_get_pci_slot(ahc_dev_softc_t);
|
||||
static inline int
|
||||
ahc_get_pci_slot(ahc_dev_softc_t pci)
|
||||
{
|
||||
return (PCI_SLOT(pci->devfn));
|
||||
}
|
||||
|
||||
static __inline int ahc_get_pci_bus(ahc_dev_softc_t);
|
||||
static __inline int
|
||||
static inline int ahc_get_pci_bus(ahc_dev_softc_t);
|
||||
static inline int
|
||||
ahc_get_pci_bus(ahc_dev_softc_t pci)
|
||||
{
|
||||
return (pci->bus->number);
|
||||
@@ -521,8 +521,8 @@ static inline void ahc_linux_pci_exit(void) {
|
||||
}
|
||||
#endif
|
||||
|
||||
static __inline void ahc_flush_device_writes(struct ahc_softc *);
|
||||
static __inline void
|
||||
static inline void ahc_flush_device_writes(struct ahc_softc *);
|
||||
static inline void
|
||||
ahc_flush_device_writes(struct ahc_softc *ahc)
|
||||
{
|
||||
/* XXX Is this sufficient for all architectures??? */
|
||||
@@ -535,81 +535,81 @@ int ahc_linux_proc_info(struct Scsi_Host *, char *, char **,
|
||||
|
||||
/*************************** Domain Validation ********************************/
|
||||
/*********************** Transaction Access Wrappers *************************/
|
||||
static __inline void ahc_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
|
||||
static __inline void ahc_set_transaction_status(struct scb *, uint32_t);
|
||||
static __inline void ahc_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
|
||||
static __inline void ahc_set_scsi_status(struct scb *, uint32_t);
|
||||
static __inline uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd);
|
||||
static __inline uint32_t ahc_get_transaction_status(struct scb *);
|
||||
static __inline uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd);
|
||||
static __inline uint32_t ahc_get_scsi_status(struct scb *);
|
||||
static __inline void ahc_set_transaction_tag(struct scb *, int, u_int);
|
||||
static __inline u_long ahc_get_transfer_length(struct scb *);
|
||||
static __inline int ahc_get_transfer_dir(struct scb *);
|
||||
static __inline void ahc_set_residual(struct scb *, u_long);
|
||||
static __inline void ahc_set_sense_residual(struct scb *scb, u_long resid);
|
||||
static __inline u_long ahc_get_residual(struct scb *);
|
||||
static __inline u_long ahc_get_sense_residual(struct scb *);
|
||||
static __inline int ahc_perform_autosense(struct scb *);
|
||||
static __inline uint32_t ahc_get_sense_bufsize(struct ahc_softc *,
|
||||
static inline void ahc_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
|
||||
static inline void ahc_set_transaction_status(struct scb *, uint32_t);
|
||||
static inline void ahc_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
|
||||
static inline void ahc_set_scsi_status(struct scb *, uint32_t);
|
||||
static inline uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd);
|
||||
static inline uint32_t ahc_get_transaction_status(struct scb *);
|
||||
static inline uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd);
|
||||
static inline uint32_t ahc_get_scsi_status(struct scb *);
|
||||
static inline void ahc_set_transaction_tag(struct scb *, int, u_int);
|
||||
static inline u_long ahc_get_transfer_length(struct scb *);
|
||||
static inline int ahc_get_transfer_dir(struct scb *);
|
||||
static inline void ahc_set_residual(struct scb *, u_long);
|
||||
static inline void ahc_set_sense_residual(struct scb *scb, u_long resid);
|
||||
static inline u_long ahc_get_residual(struct scb *);
|
||||
static inline u_long ahc_get_sense_residual(struct scb *);
|
||||
static inline int ahc_perform_autosense(struct scb *);
|
||||
static inline uint32_t ahc_get_sense_bufsize(struct ahc_softc *,
|
||||
struct scb *);
|
||||
static __inline void ahc_notify_xfer_settings_change(struct ahc_softc *,
|
||||
static inline void ahc_notify_xfer_settings_change(struct ahc_softc *,
|
||||
struct ahc_devinfo *);
|
||||
static __inline void ahc_platform_scb_free(struct ahc_softc *ahc,
|
||||
static inline void ahc_platform_scb_free(struct ahc_softc *ahc,
|
||||
struct scb *scb);
|
||||
static __inline void ahc_freeze_scb(struct scb *scb);
|
||||
static inline void ahc_freeze_scb(struct scb *scb);
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahc_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
|
||||
{
|
||||
cmd->result &= ~(CAM_STATUS_MASK << 16);
|
||||
cmd->result |= status << 16;
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahc_set_transaction_status(struct scb *scb, uint32_t status)
|
||||
{
|
||||
ahc_cmd_set_transaction_status(scb->io_ctx,status);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahc_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
|
||||
{
|
||||
cmd->result &= ~0xFFFF;
|
||||
cmd->result |= status;
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahc_set_scsi_status(struct scb *scb, uint32_t status)
|
||||
{
|
||||
ahc_cmd_set_scsi_status(scb->io_ctx, status);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return ((cmd->result >> 16) & CAM_STATUS_MASK);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
uint32_t ahc_get_transaction_status(struct scb *scb)
|
||||
{
|
||||
return (ahc_cmd_get_transaction_status(scb->io_ctx));
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return (cmd->result & 0xFFFF);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
uint32_t ahc_get_scsi_status(struct scb *scb)
|
||||
{
|
||||
return (ahc_cmd_get_scsi_status(scb->io_ctx));
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahc_set_transaction_tag(struct scb *scb, int enabled, u_int type)
|
||||
{
|
||||
/*
|
||||
@@ -618,43 +618,43 @@ void ahc_set_transaction_tag(struct scb *scb, int enabled, u_int type)
|
||||
*/
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
u_long ahc_get_transfer_length(struct scb *scb)
|
||||
{
|
||||
return (scb->platform_data->xfer_len);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
int ahc_get_transfer_dir(struct scb *scb)
|
||||
{
|
||||
return (scb->io_ctx->sc_data_direction);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahc_set_residual(struct scb *scb, u_long resid)
|
||||
{
|
||||
scsi_set_resid(scb->io_ctx, resid);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
void ahc_set_sense_residual(struct scb *scb, u_long resid)
|
||||
{
|
||||
scb->platform_data->sense_resid = resid;
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
u_long ahc_get_residual(struct scb *scb)
|
||||
{
|
||||
return scsi_get_resid(scb->io_ctx);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
u_long ahc_get_sense_residual(struct scb *scb)
|
||||
{
|
||||
return (scb->platform_data->sense_resid);
|
||||
}
|
||||
|
||||
static __inline
|
||||
static inline
|
||||
int ahc_perform_autosense(struct scb *scb)
|
||||
{
|
||||
/*
|
||||
@@ -665,20 +665,20 @@ int ahc_perform_autosense(struct scb *scb)
|
||||
return (1);
|
||||
}
|
||||
|
||||
static __inline uint32_t
|
||||
static inline uint32_t
|
||||
ahc_get_sense_bufsize(struct ahc_softc *ahc, struct scb *scb)
|
||||
{
|
||||
return (sizeof(struct scsi_sense_data));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahc_notify_xfer_settings_change(struct ahc_softc *ahc,
|
||||
struct ahc_devinfo *devinfo)
|
||||
{
|
||||
/* Nothing to do here for linux */
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahc_platform_scb_free(struct ahc_softc *ahc, struct scb *scb)
|
||||
{
|
||||
}
|
||||
@@ -687,7 +687,7 @@ int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg);
|
||||
void ahc_platform_free(struct ahc_softc *ahc);
|
||||
void ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
ahc_freeze_scb(struct scb *scb)
|
||||
{
|
||||
if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
|
||||
|
@@ -54,7 +54,7 @@
|
||||
|
||||
#include "aic7xxx_pci.h"
|
||||
|
||||
static __inline uint64_t
|
||||
static inline uint64_t
|
||||
ahc_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor)
|
||||
{
|
||||
uint64_t id;
|
||||
@@ -960,16 +960,12 @@ ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
|
||||
error = ahc_init(ahc);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
ahc->init_level++;
|
||||
|
||||
/*
|
||||
* Allow interrupts now that we are completely setup.
|
||||
*/
|
||||
error = ahc_pci_map_int(ahc);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
ahc->init_level++;
|
||||
return (0);
|
||||
return ahc_pci_map_int(ahc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -133,7 +133,7 @@ struct scsi_sense_data
|
||||
#define SCSI_STATUS_TASK_ABORTED 0x40
|
||||
|
||||
/************************* Large Disk Handling ********************************/
|
||||
static __inline int
|
||||
static inline int
|
||||
aic_sector_div(sector_t capacity, int heads, int sectors)
|
||||
{
|
||||
/* ugly, ugly sector_div calling convention.. */
|
||||
@@ -141,7 +141,7 @@ aic_sector_div(sector_t capacity, int heads, int sectors)
|
||||
return (int)capacity;
|
||||
}
|
||||
|
||||
static __inline uint32_t
|
||||
static inline uint32_t
|
||||
scsi_4btoul(uint8_t *bytes)
|
||||
{
|
||||
uint32_t rv;
|
||||
|
@@ -1,4 +1,4 @@
|
||||
EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3
|
||||
|
||||
cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o
|
||||
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i_ddp.o cxgb3i.o
|
||||
cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o
|
||||
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
|
||||
|
@@ -66,10 +66,12 @@ struct cxgb3i_hba {
|
||||
* @pdev: pointer to pci dev
|
||||
* @hba_cnt: # of hbas (the same as # of ports)
|
||||
* @hba: all the hbas on this adapter
|
||||
* @flags: bit flag for adapter event/status
|
||||
* @tx_max_size: max. tx packet size supported
|
||||
* @rx_max_size: max. rx packet size supported
|
||||
* @tag_format: ddp tag format settings
|
||||
*/
|
||||
#define CXGB3I_ADAPTER_FLAG_RESET 0x1
|
||||
struct cxgb3i_adapter {
|
||||
struct list_head list_head;
|
||||
spinlock_t lock;
|
||||
@@ -78,6 +80,7 @@ struct cxgb3i_adapter {
|
||||
unsigned char hba_cnt;
|
||||
struct cxgb3i_hba *hba[MAX_NPORTS];
|
||||
|
||||
unsigned int flags;
|
||||
unsigned int tx_max_size;
|
||||
unsigned int rx_max_size;
|
||||
|
||||
@@ -137,10 +140,9 @@ struct cxgb3i_task_data {
|
||||
int cxgb3i_iscsi_init(void);
|
||||
void cxgb3i_iscsi_cleanup(void);
|
||||
|
||||
struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *);
|
||||
void cxgb3i_adapter_remove(struct t3cdev *);
|
||||
int cxgb3i_adapter_ulp_init(struct cxgb3i_adapter *);
|
||||
void cxgb3i_adapter_ulp_cleanup(struct cxgb3i_adapter *);
|
||||
struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
|
||||
void cxgb3i_adapter_open(struct t3cdev *);
|
||||
void cxgb3i_adapter_close(struct t3cdev *);
|
||||
|
||||
struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
|
||||
struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
|
||||
|
@@ -23,19 +23,6 @@
|
||||
|
||||
#include "cxgb3i_ddp.h"
|
||||
|
||||
#define DRV_MODULE_NAME "cxgb3i_ddp"
|
||||
#define DRV_MODULE_VERSION "1.0.0"
|
||||
#define DRV_MODULE_RELDATE "Dec. 1, 2008"
|
||||
|
||||
static char version[] =
|
||||
"Chelsio S3xx iSCSI DDP " DRV_MODULE_NAME
|
||||
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
||||
|
||||
MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
|
||||
MODULE_DESCRIPTION("cxgb3i ddp pagepod manager");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
||||
#define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
|
||||
#define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
|
||||
#define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
|
||||
@@ -66,9 +53,6 @@ static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
|
||||
static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
|
||||
static unsigned char page_idx = DDP_PGIDX_MAX;
|
||||
|
||||
static LIST_HEAD(cxgb3i_ddp_list);
|
||||
static DEFINE_RWLOCK(cxgb3i_ddp_rwlock);
|
||||
|
||||
/*
|
||||
* functions to program the pagepod in h/w
|
||||
*/
|
||||
@@ -113,8 +97,8 @@ static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int idx,
|
||||
unsigned int npods)
|
||||
static void clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int tag,
|
||||
unsigned int idx, unsigned int npods)
|
||||
{
|
||||
unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
|
||||
int i;
|
||||
@@ -122,13 +106,17 @@ static int clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int idx,
|
||||
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
|
||||
struct sk_buff *skb = ddp->gl_skb[idx];
|
||||
|
||||
if (!skb) {
|
||||
ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
|
||||
tag, idx, i, npods);
|
||||
continue;
|
||||
}
|
||||
ddp->gl_skb[idx] = NULL;
|
||||
memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
|
||||
ulp_mem_io_set_hdr(skb, pm_addr);
|
||||
skb->priority = CPL_PRIORITY_CONTROL;
|
||||
cxgb3_ofld_send(ddp->tdev, skb);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
|
||||
@@ -211,7 +199,6 @@ int cxgb3i_ddp_find_page_index(unsigned long pgsz)
|
||||
ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
|
||||
return DDP_PGIDX_MAX;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_ddp_find_page_index);
|
||||
|
||||
static inline void ddp_gl_unmap(struct pci_dev *pdev,
|
||||
struct cxgb3i_gather_list *gl)
|
||||
@@ -334,7 +321,6 @@ error_out:
|
||||
kfree(gl);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_ddp_make_gl);
|
||||
|
||||
/**
|
||||
* cxgb3i_ddp_release_gl - release a page buffer list
|
||||
@@ -348,7 +334,6 @@ void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
|
||||
ddp_gl_unmap(pdev, gl);
|
||||
kfree(gl);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_ddp_release_gl);
|
||||
|
||||
/**
|
||||
* cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
|
||||
@@ -430,7 +415,6 @@ unmark_entries:
|
||||
ddp_unmark_entries(ddp, idx, npods);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_reserve);
|
||||
|
||||
/**
|
||||
* cxgb3i_ddp_tag_release - release a ddp tag
|
||||
@@ -453,22 +437,21 @@ void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
|
||||
struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
|
||||
unsigned int npods;
|
||||
|
||||
if (!gl) {
|
||||
ddp_log_error("release ddp 0x%x, idx 0x%x, gl NULL.\n",
|
||||
tag, idx);
|
||||
if (!gl || !gl->nelem) {
|
||||
ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n",
|
||||
tag, idx, gl, gl ? gl->nelem : 0);
|
||||
return;
|
||||
}
|
||||
npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
|
||||
ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
|
||||
tag, idx, npods);
|
||||
clear_ddp_map(ddp, idx, npods);
|
||||
clear_ddp_map(ddp, tag, idx, npods);
|
||||
ddp_unmark_entries(ddp, idx, npods);
|
||||
cxgb3i_ddp_release_gl(gl, ddp->pdev);
|
||||
} else
|
||||
ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
|
||||
tag, idx, ddp->nppods);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_release);
|
||||
|
||||
static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
|
||||
int reply)
|
||||
@@ -509,7 +492,6 @@ int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
|
||||
{
|
||||
return setup_conn_pgidx(tdev, tid, page_idx, reply);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_host_pagesize);
|
||||
|
||||
/**
|
||||
* cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
|
||||
@@ -526,7 +508,6 @@ int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
|
||||
|
||||
return setup_conn_pgidx(tdev, tid, pgidx, reply);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_pagesize);
|
||||
|
||||
/**
|
||||
* cxgb3i_setup_conn_digest - setup conn. digest setting
|
||||
@@ -562,26 +543,104 @@ int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
|
||||
cxgb3_ofld_send(tdev, skb);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_digest);
|
||||
|
||||
static int ddp_init(struct t3cdev *tdev)
|
||||
|
||||
/**
|
||||
* cxgb3i_adapter_ddp_info - read the adapter's ddp information
|
||||
* @tdev: t3cdev adapter
|
||||
* @tformat: tag format
|
||||
* @txsz: max tx pdu payload size, filled in by this func.
|
||||
* @rxsz: max rx pdu payload size, filled in by this func.
|
||||
* setup the tag format for a given iscsi entity
|
||||
*/
|
||||
int cxgb3i_adapter_ddp_info(struct t3cdev *tdev,
|
||||
struct cxgb3i_tag_format *tformat,
|
||||
unsigned int *txsz, unsigned int *rxsz)
|
||||
{
|
||||
struct cxgb3i_ddp_info *ddp;
|
||||
unsigned char idx_bits;
|
||||
|
||||
if (!tformat)
|
||||
return -EINVAL;
|
||||
|
||||
if (!tdev->ulp_iscsi)
|
||||
return -EINVAL;
|
||||
|
||||
ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
|
||||
|
||||
idx_bits = 32 - tformat->sw_bits;
|
||||
tformat->rsvd_bits = ddp->idx_bits;
|
||||
tformat->rsvd_shift = PPOD_IDX_SHIFT;
|
||||
tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
|
||||
|
||||
ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
|
||||
tformat->sw_bits, tformat->rsvd_bits,
|
||||
tformat->rsvd_shift, tformat->rsvd_mask);
|
||||
|
||||
*txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
|
||||
ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
|
||||
*rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
|
||||
ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
|
||||
ddp_log_info("max payload size: %u/%u, %u/%u.\n",
|
||||
*txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource
|
||||
* @tdev: t3cdev adapter
|
||||
* release all the resource held by the ddp pagepod manager for a given
|
||||
* adapter if needed
|
||||
*/
|
||||
void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
|
||||
{
|
||||
int i = 0;
|
||||
struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
|
||||
|
||||
ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp);
|
||||
|
||||
if (ddp) {
|
||||
tdev->ulp_iscsi = NULL;
|
||||
while (i < ddp->nppods) {
|
||||
struct cxgb3i_gather_list *gl = ddp->gl_map[i];
|
||||
if (gl) {
|
||||
int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
|
||||
>> PPOD_PAGES_SHIFT;
|
||||
ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
|
||||
tdev, i, npods);
|
||||
kfree(gl);
|
||||
ddp_free_gl_skb(ddp, i, npods);
|
||||
i += npods;
|
||||
} else
|
||||
i++;
|
||||
}
|
||||
cxgb3i_free_big_mem(ddp);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ddp_init - initialize the cxgb3 adapter's ddp resource
|
||||
* @tdev: t3cdev adapter
|
||||
* initialize the ddp pagepod manager for a given adapter
|
||||
*/
|
||||
static void ddp_init(struct t3cdev *tdev)
|
||||
{
|
||||
struct cxgb3i_ddp_info *ddp;
|
||||
struct ulp_iscsi_info uinfo;
|
||||
unsigned int ppmax, bits;
|
||||
int i, err;
|
||||
static int vers_printed;
|
||||
|
||||
if (!vers_printed) {
|
||||
printk(KERN_INFO "%s", version);
|
||||
vers_printed = 1;
|
||||
if (tdev->ulp_iscsi) {
|
||||
ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
|
||||
tdev, tdev->ulp_iscsi);
|
||||
return;
|
||||
}
|
||||
|
||||
err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
|
||||
if (err < 0) {
|
||||
ddp_log_error("%s, failed to get iscsi param err=%d.\n",
|
||||
tdev->name, err);
|
||||
return err;
|
||||
return;
|
||||
}
|
||||
|
||||
ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
|
||||
@@ -598,7 +657,7 @@ static int ddp_init(struct t3cdev *tdev)
|
||||
if (!ddp) {
|
||||
ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
|
||||
tdev->name, ppmax);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
|
||||
ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
|
||||
@@ -632,142 +691,26 @@ static int ddp_init(struct t3cdev *tdev)
|
||||
|
||||
tdev->ulp_iscsi = ddp;
|
||||
|
||||
/* add to the list */
|
||||
write_lock(&cxgb3i_ddp_rwlock);
|
||||
list_add_tail(&ddp->list, &cxgb3i_ddp_list);
|
||||
write_unlock(&cxgb3i_ddp_rwlock);
|
||||
|
||||
ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
|
||||
"pkt %u/%u, %u/%u.\n",
|
||||
ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
|
||||
ddp->idx_mask, ddp->rsvd_tag_mask,
|
||||
ddp->max_txsz, uinfo.max_txsz,
|
||||
ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u,"
|
||||
" %u/%u.\n",
|
||||
tdev, ppmax, ddp->idx_bits, ddp->idx_mask,
|
||||
ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
|
||||
ddp->max_rxsz, uinfo.max_rxsz);
|
||||
return 0;
|
||||
return;
|
||||
|
||||
free_ddp_map:
|
||||
cxgb3i_free_big_mem(ddp);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
|
||||
* @tdev: t3cdev adapter
|
||||
* @tformat: tag format
|
||||
* @txsz: max tx pdu payload size, filled in by this func.
|
||||
* @rxsz: max rx pdu payload size, filled in by this func.
|
||||
* initialize the ddp pagepod manager for a given adapter if needed and
|
||||
* setup the tag format for a given iscsi entity
|
||||
* cxgb3i_ddp_init - initialize ddp functions
|
||||
*/
|
||||
int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
|
||||
struct cxgb3i_tag_format *tformat,
|
||||
unsigned int *txsz, unsigned int *rxsz)
|
||||
void cxgb3i_ddp_init(struct t3cdev *tdev)
|
||||
{
|
||||
struct cxgb3i_ddp_info *ddp;
|
||||
unsigned char idx_bits;
|
||||
|
||||
if (!tformat)
|
||||
return -EINVAL;
|
||||
|
||||
if (!tdev->ulp_iscsi) {
|
||||
int err = ddp_init(tdev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (page_idx == DDP_PGIDX_MAX) {
|
||||
page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
|
||||
ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
|
||||
PAGE_SIZE, page_idx);
|
||||
}
|
||||
ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
|
||||
|
||||
idx_bits = 32 - tformat->sw_bits;
|
||||
tformat->rsvd_bits = ddp->idx_bits;
|
||||
tformat->rsvd_shift = PPOD_IDX_SHIFT;
|
||||
tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
|
||||
|
||||
ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
|
||||
tformat->sw_bits, tformat->rsvd_bits,
|
||||
tformat->rsvd_shift, tformat->rsvd_mask);
|
||||
|
||||
*txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
|
||||
ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
|
||||
*rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
|
||||
ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
|
||||
ddp_log_info("max payload size: %u/%u, %u/%u.\n",
|
||||
*txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
|
||||
return 0;
|
||||
ddp_init(tdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
|
||||
|
||||
static void ddp_release(struct cxgb3i_ddp_info *ddp)
|
||||
{
|
||||
int i = 0;
|
||||
struct t3cdev *tdev = ddp->tdev;
|
||||
|
||||
tdev->ulp_iscsi = NULL;
|
||||
while (i < ddp->nppods) {
|
||||
struct cxgb3i_gather_list *gl = ddp->gl_map[i];
|
||||
if (gl) {
|
||||
int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
|
||||
>> PPOD_PAGES_SHIFT;
|
||||
|
||||
kfree(gl);
|
||||
ddp_free_gl_skb(ddp, i, npods);
|
||||
} else
|
||||
i++;
|
||||
}
|
||||
cxgb3i_free_big_mem(ddp);
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgb3i_adapter_ddp_cleanup - release the adapter's ddp resource
|
||||
* @tdev: t3cdev adapter
|
||||
* release all the resource held by the ddp pagepod manager for a given
|
||||
* adapter if needed
|
||||
*/
|
||||
void cxgb3i_adapter_ddp_cleanup(struct t3cdev *tdev)
|
||||
{
|
||||
struct cxgb3i_ddp_info *ddp;
|
||||
|
||||
/* remove from the list */
|
||||
write_lock(&cxgb3i_ddp_rwlock);
|
||||
list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
|
||||
if (ddp->tdev == tdev) {
|
||||
list_del(&ddp->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
write_unlock(&cxgb3i_ddp_rwlock);
|
||||
|
||||
if (ddp)
|
||||
ddp_release(ddp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_cleanup);
|
||||
|
||||
/**
|
||||
* cxgb3i_ddp_init_module - module init entry point
|
||||
* initialize any driver wide global data structures
|
||||
*/
|
||||
static int __init cxgb3i_ddp_init_module(void)
|
||||
{
|
||||
page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
|
||||
ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
|
||||
PAGE_SIZE, page_idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgb3i_ddp_exit_module - module cleanup/exit entry point
|
||||
* go through the ddp list and release any resource held.
|
||||
*/
|
||||
static void __exit cxgb3i_ddp_exit_module(void)
|
||||
{
|
||||
struct cxgb3i_ddp_info *ddp;
|
||||
|
||||
/* release all ddp manager if there is any */
|
||||
write_lock(&cxgb3i_ddp_rwlock);
|
||||
list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
|
||||
list_del(&ddp->list);
|
||||
ddp_release(ddp);
|
||||
}
|
||||
write_unlock(&cxgb3i_ddp_rwlock);
|
||||
}
|
||||
|
||||
module_init(cxgb3i_ddp_init_module);
|
||||
module_exit(cxgb3i_ddp_exit_module);
|
||||
|
@@ -301,7 +301,9 @@ int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply,
|
||||
int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid,
|
||||
int hcrc, int dcrc, int reply);
|
||||
int cxgb3i_ddp_find_page_index(unsigned long pgsz);
|
||||
int cxgb3i_adapter_ddp_init(struct t3cdev *, struct cxgb3i_tag_format *,
|
||||
int cxgb3i_adapter_ddp_info(struct t3cdev *, struct cxgb3i_tag_format *,
|
||||
unsigned int *txsz, unsigned int *rxsz);
|
||||
void cxgb3i_adapter_ddp_cleanup(struct t3cdev *);
|
||||
|
||||
void cxgb3i_ddp_init(struct t3cdev *);
|
||||
void cxgb3i_ddp_cleanup(struct t3cdev *);
|
||||
#endif
|
||||
|
@@ -12,8 +12,8 @@
|
||||
#include "cxgb3i.h"
|
||||
|
||||
#define DRV_MODULE_NAME "cxgb3i"
|
||||
#define DRV_MODULE_VERSION "1.0.1"
|
||||
#define DRV_MODULE_RELDATE "Jan. 2009"
|
||||
#define DRV_MODULE_VERSION "1.0.2"
|
||||
#define DRV_MODULE_RELDATE "Mar. 2009"
|
||||
|
||||
static char version[] =
|
||||
"Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
|
||||
@@ -26,6 +26,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
||||
static void open_s3_dev(struct t3cdev *);
|
||||
static void close_s3_dev(struct t3cdev *);
|
||||
static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error);
|
||||
|
||||
static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
|
||||
static struct cxgb3_client t3c_client = {
|
||||
@@ -33,6 +34,7 @@ static struct cxgb3_client t3c_client = {
|
||||
.handlers = cxgb3i_cpl_handlers,
|
||||
.add = open_s3_dev,
|
||||
.remove = close_s3_dev,
|
||||
.err_handler = s3_err_handler,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -48,8 +50,9 @@ static void open_s3_dev(struct t3cdev *t3dev)
|
||||
vers_printed = 1;
|
||||
}
|
||||
|
||||
cxgb3i_ddp_init(t3dev);
|
||||
cxgb3i_sdev_add(t3dev, &t3c_client);
|
||||
cxgb3i_adapter_add(t3dev);
|
||||
cxgb3i_adapter_open(t3dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -58,8 +61,28 @@ static void open_s3_dev(struct t3cdev *t3dev)
|
||||
*/
|
||||
static void close_s3_dev(struct t3cdev *t3dev)
|
||||
{
|
||||
cxgb3i_adapter_remove(t3dev);
|
||||
cxgb3i_adapter_close(t3dev);
|
||||
cxgb3i_sdev_remove(t3dev);
|
||||
cxgb3i_ddp_cleanup(t3dev);
|
||||
}
|
||||
|
||||
static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error)
|
||||
{
|
||||
struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev);
|
||||
|
||||
cxgb3i_log_info("snic 0x%p, tdev 0x%p, status 0x%x, err 0x%x.\n",
|
||||
snic, tdev, status, error);
|
||||
if (!snic)
|
||||
return;
|
||||
|
||||
switch (status) {
|
||||
case OFFLOAD_STATUS_DOWN:
|
||||
snic->flags |= CXGB3I_ADAPTER_FLAG_RESET;
|
||||
break;
|
||||
case OFFLOAD_STATUS_UP:
|
||||
snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -53,36 +53,52 @@ static LIST_HEAD(cxgb3i_snic_list);
|
||||
static DEFINE_RWLOCK(cxgb3i_snic_rwlock);
|
||||
|
||||
/**
|
||||
* cxgb3i_adapter_add - init a s3 adapter structure and any h/w settings
|
||||
* @t3dev: t3cdev adapter
|
||||
* return the resulting cxgb3i_adapter struct
|
||||
* cxgb3i_adpater_find_by_tdev - find the cxgb3i_adapter structure via t3cdev
|
||||
* @tdev: t3cdev pointer
|
||||
*/
|
||||
struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *t3dev)
|
||||
struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *tdev)
|
||||
{
|
||||
struct cxgb3i_adapter *snic;
|
||||
struct adapter *adapter = tdev2adap(t3dev);
|
||||
int i;
|
||||
|
||||
snic = kzalloc(sizeof(*snic), GFP_KERNEL);
|
||||
if (!snic) {
|
||||
cxgb3i_api_debug("cxgb3 %s, OOM.\n", t3dev->name);
|
||||
return NULL;
|
||||
read_lock(&cxgb3i_snic_rwlock);
|
||||
list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
|
||||
if (snic->tdev == tdev) {
|
||||
read_unlock(&cxgb3i_snic_rwlock);
|
||||
return snic;
|
||||
}
|
||||
}
|
||||
spin_lock_init(&snic->lock);
|
||||
read_unlock(&cxgb3i_snic_rwlock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int adapter_update(struct cxgb3i_adapter *snic)
|
||||
{
|
||||
cxgb3i_log_info("snic 0x%p, t3dev 0x%p, updating.\n",
|
||||
snic, snic->tdev);
|
||||
return cxgb3i_adapter_ddp_info(snic->tdev, &snic->tag_format,
|
||||
&snic->tx_max_size,
|
||||
&snic->rx_max_size);
|
||||
}
|
||||
|
||||
static int adapter_add(struct cxgb3i_adapter *snic)
|
||||
{
|
||||
struct t3cdev *t3dev = snic->tdev;
|
||||
struct adapter *adapter = tdev2adap(t3dev);
|
||||
int i, err;
|
||||
|
||||
snic->tdev = t3dev;
|
||||
snic->pdev = adapter->pdev;
|
||||
snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
|
||||
|
||||
if (cxgb3i_adapter_ddp_init(t3dev, &snic->tag_format,
|
||||
err = cxgb3i_adapter_ddp_info(t3dev, &snic->tag_format,
|
||||
&snic->tx_max_size,
|
||||
&snic->rx_max_size) < 0)
|
||||
goto free_snic;
|
||||
&snic->rx_max_size);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
for_each_port(adapter, i) {
|
||||
snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]);
|
||||
if (!snic->hba[i])
|
||||
goto ulp_cleanup;
|
||||
return -EINVAL;
|
||||
}
|
||||
snic->hba_cnt = adapter->params.nports;
|
||||
|
||||
@@ -91,46 +107,71 @@ struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *t3dev)
|
||||
list_add_tail(&snic->list_head, &cxgb3i_snic_list);
|
||||
write_unlock(&cxgb3i_snic_rwlock);
|
||||
|
||||
return snic;
|
||||
|
||||
ulp_cleanup:
|
||||
cxgb3i_adapter_ddp_cleanup(t3dev);
|
||||
free_snic:
|
||||
kfree(snic);
|
||||
return NULL;
|
||||
cxgb3i_log_info("t3dev 0x%p open, snic 0x%p, %u scsi hosts added.\n",
|
||||
t3dev, snic, snic->hba_cnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgb3i_adapter_remove - release the resources held and cleanup h/w settings
|
||||
* cxgb3i_adapter_open - init a s3 adapter structure and any h/w settings
|
||||
* @t3dev: t3cdev adapter
|
||||
*/
|
||||
void cxgb3i_adapter_remove(struct t3cdev *t3dev)
|
||||
void cxgb3i_adapter_open(struct t3cdev *t3dev)
|
||||
{
|
||||
struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev);
|
||||
int err;
|
||||
|
||||
if (snic)
|
||||
err = adapter_update(snic);
|
||||
else {
|
||||
snic = kzalloc(sizeof(*snic), GFP_KERNEL);
|
||||
if (snic) {
|
||||
spin_lock_init(&snic->lock);
|
||||
snic->tdev = t3dev;
|
||||
err = adapter_add(snic);
|
||||
} else
|
||||
err = -ENOMEM;
|
||||
}
|
||||
|
||||
if (err < 0) {
|
||||
cxgb3i_log_info("snic 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
|
||||
snic, snic ? snic->flags : 0, t3dev, err);
|
||||
if (snic) {
|
||||
snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET;
|
||||
cxgb3i_adapter_close(t3dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgb3i_adapter_close - release the resources held and cleanup h/w settings
|
||||
* @t3dev: t3cdev adapter
|
||||
*/
|
||||
void cxgb3i_adapter_close(struct t3cdev *t3dev)
|
||||
{
|
||||
struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev);
|
||||
int i;
|
||||
struct cxgb3i_adapter *snic;
|
||||
|
||||
if (!snic || snic->flags & CXGB3I_ADAPTER_FLAG_RESET) {
|
||||
cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, f 0x%x.\n",
|
||||
t3dev, snic, snic ? snic->flags : 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/* remove from the list */
|
||||
write_lock(&cxgb3i_snic_rwlock);
|
||||
list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
|
||||
if (snic->tdev == t3dev) {
|
||||
list_del(&snic->list_head);
|
||||
break;
|
||||
}
|
||||
}
|
||||
list_del(&snic->list_head);
|
||||
write_unlock(&cxgb3i_snic_rwlock);
|
||||
|
||||
if (snic) {
|
||||
for (i = 0; i < snic->hba_cnt; i++) {
|
||||
if (snic->hba[i]) {
|
||||
cxgb3i_hba_host_remove(snic->hba[i]);
|
||||
snic->hba[i] = NULL;
|
||||
}
|
||||
for (i = 0; i < snic->hba_cnt; i++) {
|
||||
if (snic->hba[i]) {
|
||||
cxgb3i_hba_host_remove(snic->hba[i]);
|
||||
snic->hba[i] = NULL;
|
||||
}
|
||||
|
||||
/* release ddp resources */
|
||||
cxgb3i_adapter_ddp_cleanup(snic->tdev);
|
||||
kfree(snic);
|
||||
}
|
||||
cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, %u scsi hosts removed.\n",
|
||||
t3dev, snic, snic->hba_cnt);
|
||||
kfree(snic);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -170,7 +211,8 @@ struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
|
||||
shost = iscsi_host_alloc(&cxgb3i_host_template,
|
||||
sizeof(struct cxgb3i_hba), 1);
|
||||
if (!shost) {
|
||||
cxgb3i_log_info("iscsi_host_alloc failed.\n");
|
||||
cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_alloc failed.\n",
|
||||
snic, ndev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -188,7 +230,8 @@ struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
|
||||
pci_dev_get(snic->pdev);
|
||||
err = iscsi_host_add(shost, &snic->pdev->dev);
|
||||
if (err) {
|
||||
cxgb3i_log_info("iscsi_host_add failed.\n");
|
||||
cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_add failed.\n",
|
||||
snic, ndev);
|
||||
goto pci_dev_put;
|
||||
}
|
||||
|
||||
|
@@ -94,29 +94,30 @@ static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata)
|
||||
if (!cdata)
|
||||
goto error_out;
|
||||
|
||||
if (c3cn->saddr.sin_port != 0) {
|
||||
idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
|
||||
if (idx < 0 || idx >= cxgb3_max_connect)
|
||||
return 0;
|
||||
if (!test_and_set_bit(idx, cdata->sport_map))
|
||||
return -EADDRINUSE;
|
||||
if (c3cn->saddr.sin_port) {
|
||||
cxgb3i_log_error("connect, sin_port NON-ZERO %u.\n",
|
||||
c3cn->saddr.sin_port);
|
||||
return -EADDRINUSE;
|
||||
}
|
||||
|
||||
/* the sport_map_next may not be accurate but that is okay, sport_map
|
||||
should be */
|
||||
start = idx = cdata->sport_map_next;
|
||||
spin_lock_bh(&cdata->lock);
|
||||
start = idx = cdata->sport_next;
|
||||
do {
|
||||
if (++idx >= cxgb3_max_connect)
|
||||
idx = 0;
|
||||
if (!(test_and_set_bit(idx, cdata->sport_map))) {
|
||||
if (!cdata->sport_conn[idx]) {
|
||||
c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx);
|
||||
cdata->sport_map_next = idx;
|
||||
cdata->sport_next = idx;
|
||||
cdata->sport_conn[idx] = c3cn;
|
||||
spin_unlock_bh(&cdata->lock);
|
||||
|
||||
c3cn_conn_debug("%s reserve port %u.\n",
|
||||
cdata->cdev->name,
|
||||
cxgb3_sport_base + idx);
|
||||
return 0;
|
||||
}
|
||||
} while (idx != start);
|
||||
spin_unlock_bh(&cdata->lock);
|
||||
|
||||
error_out:
|
||||
return -EADDRNOTAVAIL;
|
||||
@@ -124,15 +125,19 @@ error_out:
|
||||
|
||||
static void c3cn_put_port(struct s3_conn *c3cn)
|
||||
{
|
||||
struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev);
|
||||
if (!c3cn->cdev)
|
||||
return;
|
||||
|
||||
if (c3cn->saddr.sin_port) {
|
||||
struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev);
|
||||
int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
|
||||
|
||||
c3cn->saddr.sin_port = 0;
|
||||
if (idx < 0 || idx >= cxgb3_max_connect)
|
||||
return;
|
||||
clear_bit(idx, cdata->sport_map);
|
||||
spin_lock_bh(&cdata->lock);
|
||||
cdata->sport_conn[idx] = NULL;
|
||||
spin_unlock_bh(&cdata->lock);
|
||||
c3cn_conn_debug("%s, release port %u.\n",
|
||||
cdata->cdev->name, cxgb3_sport_base + idx);
|
||||
}
|
||||
@@ -1305,11 +1310,7 @@ static void c3cn_release_offload_resources(struct s3_conn *c3cn)
|
||||
struct t3cdev *cdev = c3cn->cdev;
|
||||
unsigned int tid = c3cn->tid;
|
||||
|
||||
if (!cdev)
|
||||
return;
|
||||
|
||||
c3cn->qset = 0;
|
||||
|
||||
c3cn_free_cpl_skbs(c3cn);
|
||||
|
||||
if (c3cn->wr_avail != c3cn->wr_max) {
|
||||
@@ -1317,18 +1318,22 @@ static void c3cn_release_offload_resources(struct s3_conn *c3cn)
|
||||
reset_wr_list(c3cn);
|
||||
}
|
||||
|
||||
if (c3cn->l2t) {
|
||||
l2t_release(L2DATA(cdev), c3cn->l2t);
|
||||
c3cn->l2t = NULL;
|
||||
}
|
||||
|
||||
if (c3cn->state == C3CN_STATE_CONNECTING) /* we have ATID */
|
||||
s3_free_atid(cdev, tid);
|
||||
else { /* we have TID */
|
||||
cxgb3_remove_tid(cdev, (void *)c3cn, tid);
|
||||
c3cn_put(c3cn);
|
||||
if (cdev) {
|
||||
if (c3cn->l2t) {
|
||||
l2t_release(L2DATA(cdev), c3cn->l2t);
|
||||
c3cn->l2t = NULL;
|
||||
}
|
||||
if (c3cn->state == C3CN_STATE_CONNECTING)
|
||||
/* we have ATID */
|
||||
s3_free_atid(cdev, tid);
|
||||
else {
|
||||
/* we have TID */
|
||||
cxgb3_remove_tid(cdev, (void *)c3cn, tid);
|
||||
c3cn_put(c3cn);
|
||||
}
|
||||
}
|
||||
|
||||
c3cn->dst_cache = NULL;
|
||||
c3cn->cdev = NULL;
|
||||
}
|
||||
|
||||
@@ -1417,17 +1422,18 @@ static void c3cn_active_close(struct s3_conn *c3cn)
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgb3i_c3cn_release - close and release an iscsi tcp connection
|
||||
* cxgb3i_c3cn_release - close and release an iscsi tcp connection and any
|
||||
* resource held
|
||||
* @c3cn: the iscsi tcp connection
|
||||
*/
|
||||
void cxgb3i_c3cn_release(struct s3_conn *c3cn)
|
||||
{
|
||||
c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n",
|
||||
c3cn, c3cn->state, c3cn->flags);
|
||||
if (likely(c3cn->state != C3CN_STATE_CONNECTING))
|
||||
c3cn_active_close(c3cn);
|
||||
else
|
||||
if (unlikely(c3cn->state == C3CN_STATE_CONNECTING))
|
||||
c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
|
||||
else if (likely(c3cn->state != C3CN_STATE_CLOSED))
|
||||
c3cn_active_close(c3cn);
|
||||
c3cn_put(c3cn);
|
||||
}
|
||||
|
||||
@@ -1656,7 +1662,6 @@ int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
|
||||
c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
|
||||
ip_rt_put(rt);
|
||||
c3cn_put_port(c3cn);
|
||||
c3cn->daddr.sin_port = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1776,10 +1781,25 @@ out_err:
|
||||
static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata)
|
||||
{
|
||||
struct adap_ports *ports = &cdata->ports;
|
||||
struct s3_conn *c3cn;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cxgb3_max_connect; i++) {
|
||||
if (cdata->sport_conn[i]) {
|
||||
c3cn = cdata->sport_conn[i];
|
||||
cdata->sport_conn[i] = NULL;
|
||||
|
||||
spin_lock_bh(&c3cn->lock);
|
||||
c3cn->cdev = NULL;
|
||||
c3cn_set_flag(c3cn, C3CN_OFFLOAD_DOWN);
|
||||
c3cn_closed(c3cn);
|
||||
spin_unlock_bh(&c3cn->lock);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ports->nports; i++)
|
||||
NDEV2CDATA(ports->lldevs[i]) = NULL;
|
||||
|
||||
cxgb3i_free_big_mem(cdata);
|
||||
}
|
||||
|
||||
@@ -1821,21 +1841,27 @@ void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
|
||||
struct cxgb3i_sdev_data *cdata;
|
||||
struct ofld_page_info rx_page_info;
|
||||
unsigned int wr_len;
|
||||
int mapsize = DIV_ROUND_UP(cxgb3_max_connect,
|
||||
8 * sizeof(unsigned long));
|
||||
int mapsize = cxgb3_max_connect * sizeof(struct s3_conn *);
|
||||
int i;
|
||||
|
||||
cdata = cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL);
|
||||
if (!cdata)
|
||||
if (!cdata) {
|
||||
cxgb3i_log_warn("t3dev 0x%p, offload up, OOM %d.\n",
|
||||
cdev, mapsize);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 ||
|
||||
cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 ||
|
||||
cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0)
|
||||
cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
|
||||
cxgb3i_log_warn("t3dev 0x%p, offload up, ioctl failed.\n",
|
||||
cdev);
|
||||
goto free_cdata;
|
||||
}
|
||||
|
||||
s3_init_wr_tab(wr_len);
|
||||
|
||||
spin_lock_init(&cdata->lock);
|
||||
INIT_LIST_HEAD(&cdata->list);
|
||||
cdata->cdev = cdev;
|
||||
cdata->client = client;
|
||||
@@ -1847,6 +1873,7 @@ void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
|
||||
list_add_tail(&cdata->list, &cdata_list);
|
||||
write_unlock(&cdata_rwlock);
|
||||
|
||||
cxgb3i_log_info("t3dev 0x%p, offload up, added.\n", cdev);
|
||||
return;
|
||||
|
||||
free_cdata:
|
||||
@@ -1861,6 +1888,8 @@ void cxgb3i_sdev_remove(struct t3cdev *cdev)
|
||||
{
|
||||
struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
|
||||
|
||||
cxgb3i_log_info("t3dev 0x%p, offload down, remove.\n", cdev);
|
||||
|
||||
write_lock(&cdata_rwlock);
|
||||
list_del(&cdata->list);
|
||||
write_unlock(&cdata_rwlock);
|
||||
|
@@ -16,7 +16,7 @@
|
||||
#define _CXGB3I_OFFLOAD_H
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <net/tcp.h>
|
||||
#include <linux/in.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "adapter.h"
|
||||
@@ -135,11 +135,11 @@ enum c3cn_flags {
|
||||
C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */
|
||||
C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */
|
||||
C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */
|
||||
C3CN_OFFLOAD_DOWN /* offload function off */
|
||||
};
|
||||
|
||||
/**
|
||||
* cxgb3i_sdev_data - Per adapter data.
|
||||
*
|
||||
* Linked off of each Ethernet device port on the adapter.
|
||||
* Also available via the t3cdev structure since we have pointers to our port
|
||||
* net_device's there ...
|
||||
@@ -148,16 +148,17 @@ enum c3cn_flags {
|
||||
* @cdev: t3cdev adapter
|
||||
* @client: CPL client pointer
|
||||
* @ports: array of adapter ports
|
||||
* @sport_map_next: next index into the port map
|
||||
* @sport_map: source port map
|
||||
* @sport_next: next port
|
||||
* @sport_conn: source port connection
|
||||
*/
|
||||
struct cxgb3i_sdev_data {
|
||||
struct list_head list;
|
||||
struct t3cdev *cdev;
|
||||
struct cxgb3_client *client;
|
||||
struct adap_ports ports;
|
||||
unsigned int sport_map_next;
|
||||
unsigned long sport_map[0];
|
||||
spinlock_t lock;
|
||||
unsigned int sport_next;
|
||||
struct s3_conn *sport_conn[0];
|
||||
};
|
||||
#define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr)
|
||||
#define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev)
|
||||
|
@@ -1,8 +1,2 @@
|
||||
# $Id: Makefile
|
||||
|
||||
obj-$(CONFIG_FCOE) += fcoe.o
|
||||
|
||||
fcoe-y := \
|
||||
libfcoe.o \
|
||||
fcoe_sw.o \
|
||||
fc_transport_fcoe.o
|
||||
obj-$(CONFIG_LIBFCOE) += libfcoe.o
|
||||
|
@@ -1,443 +0,0 @@
|
||||
/*
|
||||
* Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Maintained at www.Open-FCoE.org
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <scsi/libfcoe.h>
|
||||
#include <scsi/fc_transport_fcoe.h>
|
||||
|
||||
/* internal fcoe transport */
|
||||
struct fcoe_transport_internal {
|
||||
struct fcoe_transport *t;
|
||||
struct net_device *netdev;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/* fcoe transports list and its lock */
|
||||
static LIST_HEAD(fcoe_transports);
|
||||
static DEFINE_MUTEX(fcoe_transports_lock);
|
||||
|
||||
/**
|
||||
* fcoe_transport_default() - Returns ptr to the default transport fcoe_sw
|
||||
*/
|
||||
struct fcoe_transport *fcoe_transport_default(void)
|
||||
{
|
||||
return &fcoe_sw_transport;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_to_pcidev() - get the pci dev from a netdev
|
||||
* @netdev: the netdev that pci dev will be retrived from
|
||||
*
|
||||
* Returns: NULL or the corrsponding pci_dev
|
||||
*/
|
||||
struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev)
|
||||
{
|
||||
if (!netdev->dev.parent)
|
||||
return NULL;
|
||||
return to_pci_dev(netdev->dev.parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_device_lookup() - Lookup a transport
|
||||
* @netdev: the netdev the transport to be attached to
|
||||
*
|
||||
* This will look for existing offload driver, if not found, it falls back to
|
||||
* the default sw hba (fcoe_sw) as its fcoe transport.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static struct fcoe_transport_internal *
|
||||
fcoe_transport_device_lookup(struct fcoe_transport *t,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_transport_internal *ti;
|
||||
|
||||
/* assign the transpor to this device */
|
||||
mutex_lock(&t->devlock);
|
||||
list_for_each_entry(ti, &t->devlist, list) {
|
||||
if (ti->netdev == netdev) {
|
||||
mutex_unlock(&t->devlock);
|
||||
return ti;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&t->devlock);
|
||||
return NULL;
|
||||
}
|
||||
/**
|
||||
* fcoe_transport_device_add() - Assign a transport to a device
|
||||
* @netdev: the netdev the transport to be attached to
|
||||
*
|
||||
* This will look for existing offload driver, if not found, it falls back to
|
||||
* the default sw hba (fcoe_sw) as its fcoe transport.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static int fcoe_transport_device_add(struct fcoe_transport *t,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_transport_internal *ti;
|
||||
|
||||
ti = fcoe_transport_device_lookup(t, netdev);
|
||||
if (ti) {
|
||||
printk(KERN_DEBUG "fcoe_transport_device_add:"
|
||||
"device %s is already added to transport %s\n",
|
||||
netdev->name, t->name);
|
||||
return -EEXIST;
|
||||
}
|
||||
/* allocate an internal struct to host the netdev and the list */
|
||||
ti = kzalloc(sizeof(*ti), GFP_KERNEL);
|
||||
if (!ti)
|
||||
return -ENOMEM;
|
||||
|
||||
ti->t = t;
|
||||
ti->netdev = netdev;
|
||||
INIT_LIST_HEAD(&ti->list);
|
||||
dev_hold(ti->netdev);
|
||||
|
||||
mutex_lock(&t->devlock);
|
||||
list_add(&ti->list, &t->devlist);
|
||||
mutex_unlock(&t->devlock);
|
||||
|
||||
printk(KERN_DEBUG "fcoe_transport_device_add:"
|
||||
"device %s added to transport %s\n",
|
||||
netdev->name, t->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_device_remove() - Remove a device from its transport
|
||||
* @netdev: the netdev the transport to be attached to
|
||||
*
|
||||
* This removes the device from the transport so the given transport will
|
||||
* not manage this device any more
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static int fcoe_transport_device_remove(struct fcoe_transport *t,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_transport_internal *ti;
|
||||
|
||||
ti = fcoe_transport_device_lookup(t, netdev);
|
||||
if (!ti) {
|
||||
printk(KERN_DEBUG "fcoe_transport_device_remove:"
|
||||
"device %s is not managed by transport %s\n",
|
||||
netdev->name, t->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
mutex_lock(&t->devlock);
|
||||
list_del(&ti->list);
|
||||
mutex_unlock(&t->devlock);
|
||||
printk(KERN_DEBUG "fcoe_transport_device_remove:"
|
||||
"device %s removed from transport %s\n",
|
||||
netdev->name, t->name);
|
||||
dev_put(ti->netdev);
|
||||
kfree(ti);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_device_remove_all() - Remove all from transport devlist
|
||||
*
|
||||
* This removes the device from the transport so the given transport will
|
||||
* not manage this device any more
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static void fcoe_transport_device_remove_all(struct fcoe_transport *t)
|
||||
{
|
||||
struct fcoe_transport_internal *ti, *tmp;
|
||||
|
||||
mutex_lock(&t->devlock);
|
||||
list_for_each_entry_safe(ti, tmp, &t->devlist, list) {
|
||||
list_del(&ti->list);
|
||||
kfree(ti);
|
||||
}
|
||||
mutex_unlock(&t->devlock);
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_match() - Use the bus device match function to match the hw
|
||||
* @t: The fcoe transport to check
|
||||
* @netdev: The netdev to match against
|
||||
*
|
||||
* This function is used to check if the given transport wants to manage the
|
||||
* input netdev. if the transports implements the match function, it will be
|
||||
* called, o.w. we just compare the pci vendor and device id.
|
||||
*
|
||||
* Returns: true for match up
|
||||
*/
|
||||
static bool fcoe_transport_match(struct fcoe_transport *t,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
/* match transport by vendor and device id */
|
||||
struct pci_dev *pci;
|
||||
|
||||
pci = fcoe_transport_pcidev(netdev);
|
||||
|
||||
if (pci) {
|
||||
printk(KERN_DEBUG "fcoe_transport_match:"
|
||||
"%s:%x:%x -- %s:%x:%x\n",
|
||||
t->name, t->vendor, t->device,
|
||||
netdev->name, pci->vendor, pci->device);
|
||||
|
||||
/* if transport supports match */
|
||||
if (t->match)
|
||||
return t->match(netdev);
|
||||
|
||||
/* else just compare the vendor and device id: pci only */
|
||||
return (t->vendor == pci->vendor) && (t->device == pci->device);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_lookup() - Check if the transport is already registered
|
||||
* @t: the transport to be looked up
|
||||
*
|
||||
* This compares the parent device (pci) vendor and device id
|
||||
*
|
||||
* Returns: NULL if not found
|
||||
*
|
||||
* TODO: return default sw transport if no other transport is found
|
||||
*/
|
||||
static struct fcoe_transport *
|
||||
fcoe_transport_lookup(struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_transport *t;
|
||||
|
||||
mutex_lock(&fcoe_transports_lock);
|
||||
list_for_each_entry(t, &fcoe_transports, list) {
|
||||
if (fcoe_transport_match(t, netdev)) {
|
||||
mutex_unlock(&fcoe_transports_lock);
|
||||
return t;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&fcoe_transports_lock);
|
||||
|
||||
printk(KERN_DEBUG "fcoe_transport_lookup:"
|
||||
"use default transport for %s\n", netdev->name);
|
||||
return fcoe_transport_default();
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_register() - Adds a fcoe transport to the fcoe transports list
|
||||
* @t: ptr to the fcoe transport to be added
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
int fcoe_transport_register(struct fcoe_transport *t)
|
||||
{
|
||||
struct fcoe_transport *tt;
|
||||
|
||||
/* TODO - add fcoe_transport specific initialization here */
|
||||
mutex_lock(&fcoe_transports_lock);
|
||||
list_for_each_entry(tt, &fcoe_transports, list) {
|
||||
if (tt == t) {
|
||||
mutex_unlock(&fcoe_transports_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
list_add_tail(&t->list, &fcoe_transports);
|
||||
mutex_unlock(&fcoe_transports_lock);
|
||||
|
||||
printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_transport_register);
|
||||
|
||||
/**
|
||||
* fcoe_transport_unregister() - Remove the tranport fro the fcoe transports list
|
||||
* @t: ptr to the fcoe transport to be removed
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
int fcoe_transport_unregister(struct fcoe_transport *t)
|
||||
{
|
||||
struct fcoe_transport *tt, *tmp;
|
||||
|
||||
mutex_lock(&fcoe_transports_lock);
|
||||
list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) {
|
||||
if (tt == t) {
|
||||
list_del(&t->list);
|
||||
mutex_unlock(&fcoe_transports_lock);
|
||||
fcoe_transport_device_remove_all(t);
|
||||
printk(KERN_DEBUG "fcoe_transport_unregister:%s\n",
|
||||
t->name);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&fcoe_transports_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_transport_unregister);
|
||||
|
||||
/**
|
||||
* fcoe_load_transport_driver() - Load an offload driver by alias name
|
||||
* @netdev: the target net device
|
||||
*
|
||||
* Requests for an offload driver module as the fcoe transport, if fails, it
|
||||
* falls back to use the SW HBA (fcoe_sw) as its transport
|
||||
*
|
||||
* TODO -
|
||||
* 1. supports only PCI device
|
||||
* 2. needs fix for VLAn and bonding
|
||||
* 3. pure hw fcoe hba may not have netdev
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
int fcoe_load_transport_driver(struct net_device *netdev)
|
||||
{
|
||||
struct pci_dev *pci;
|
||||
struct device *dev = netdev->dev.parent;
|
||||
|
||||
if (fcoe_transport_lookup(netdev)) {
|
||||
/* load default transport */
|
||||
printk(KERN_DEBUG "fcoe: already loaded transport for %s\n",
|
||||
netdev->name);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
pci = to_pci_dev(dev);
|
||||
if (dev->bus != &pci_bus_type) {
|
||||
printk(KERN_DEBUG "fcoe: support noly PCI device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
printk(KERN_DEBUG "fcoe: loading driver fcoe-pci-0x%04x-0x%04x\n",
|
||||
pci->vendor, pci->device);
|
||||
|
||||
return request_module("fcoe-pci-0x%04x-0x%04x",
|
||||
pci->vendor, pci->device);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_load_transport_driver);
|
||||
|
||||
/**
|
||||
* fcoe_transport_attach() - Load transport to fcoe
|
||||
* @netdev: the netdev the transport to be attached to
|
||||
*
|
||||
* This will look for existing offload driver, if not found, it falls back to
|
||||
* the default sw hba (fcoe_sw) as its fcoe transport.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
int fcoe_transport_attach(struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_transport *t;
|
||||
|
||||
/* find the corresponding transport */
|
||||
t = fcoe_transport_lookup(netdev);
|
||||
if (!t) {
|
||||
printk(KERN_DEBUG "fcoe_transport_attach"
|
||||
":no transport for %s:use %s\n",
|
||||
netdev->name, t->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
/* add to the transport */
|
||||
if (fcoe_transport_device_add(t, netdev)) {
|
||||
printk(KERN_DEBUG "fcoe_transport_attach"
|
||||
":failed to add %s to tramsport %s\n",
|
||||
netdev->name, t->name);
|
||||
return -EIO;
|
||||
}
|
||||
/* transport create function */
|
||||
if (t->create)
|
||||
t->create(netdev);
|
||||
|
||||
printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n",
|
||||
t->name, netdev->name);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_transport_attach);
|
||||
|
||||
/**
|
||||
* fcoe_transport_release() - Unload transport from fcoe
|
||||
* @netdev: the net device on which fcoe is to be released
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
int fcoe_transport_release(struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_transport *t;
|
||||
|
||||
/* find the corresponding transport */
|
||||
t = fcoe_transport_lookup(netdev);
|
||||
if (!t) {
|
||||
printk(KERN_DEBUG "fcoe_transport_release:"
|
||||
"no transport for %s:use %s\n",
|
||||
netdev->name, t->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
/* remove the device from the transport */
|
||||
if (fcoe_transport_device_remove(t, netdev)) {
|
||||
printk(KERN_DEBUG "fcoe_transport_release:"
|
||||
"failed to add %s to tramsport %s\n",
|
||||
netdev->name, t->name);
|
||||
return -EIO;
|
||||
}
|
||||
/* transport destroy function */
|
||||
if (t->destroy)
|
||||
t->destroy(netdev);
|
||||
|
||||
printk(KERN_DEBUG "fcoe_transport_release:"
|
||||
"device %s dettached from transport %s\n",
|
||||
netdev->name, t->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_transport_release);
|
||||
|
||||
/**
|
||||
* fcoe_transport_init() - Initializes fcoe transport layer
|
||||
*
|
||||
* This prepares for the fcoe transport layer
|
||||
*
|
||||
* Returns: none
|
||||
*/
|
||||
int __init fcoe_transport_init(void)
|
||||
{
|
||||
INIT_LIST_HEAD(&fcoe_transports);
|
||||
mutex_init(&fcoe_transports_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_exit() - Cleans up the fcoe transport layer
|
||||
*
|
||||
* This cleans up the fcoe transport layer. removing any transport on the list,
|
||||
* note that the transport destroy func is not called here.
|
||||
*
|
||||
* Returns: none
|
||||
*/
|
||||
int __exit fcoe_transport_exit(void)
|
||||
{
|
||||
struct fcoe_transport *t, *tmp;
|
||||
|
||||
mutex_lock(&fcoe_transports_lock);
|
||||
list_for_each_entry_safe(t, tmp, &fcoe_transports, list) {
|
||||
list_del(&t->list);
|
||||
mutex_unlock(&fcoe_transports_lock);
|
||||
fcoe_transport_device_remove_all(t);
|
||||
mutex_lock(&fcoe_transports_lock);
|
||||
}
|
||||
mutex_unlock(&fcoe_transports_lock);
|
||||
return 0;
|
||||
}
|
1878
drivers/scsi/fcoe/fcoe.c
Normal file
1878
drivers/scsi/fcoe/fcoe.c
Normal file
File diff soppresso perché troppo grande
Carica Diff
75
drivers/scsi/fcoe/fcoe.h
Normal file
75
drivers/scsi/fcoe/fcoe.h
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright(c) 2009 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Maintained at www.Open-FCoE.org
|
||||
*/
|
||||
|
||||
#ifndef _FCOE_H_
|
||||
#define _FCOE_H_
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#define FCOE_MAX_QUEUE_DEPTH 256
|
||||
#define FCOE_LOW_QUEUE_DEPTH 32
|
||||
|
||||
#define FCOE_WORD_TO_BYTE 4
|
||||
|
||||
#define FCOE_VERSION "0.1"
|
||||
#define FCOE_NAME "fcoe"
|
||||
#define FCOE_VENDOR "Open-FCoE.org"
|
||||
|
||||
#define FCOE_MAX_LUN 255
|
||||
#define FCOE_MAX_FCP_TARGET 256
|
||||
|
||||
#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
|
||||
|
||||
#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
|
||||
#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
|
||||
|
||||
/*
|
||||
* this percpu struct for fcoe
|
||||
*/
|
||||
struct fcoe_percpu_s {
|
||||
struct task_struct *thread;
|
||||
struct sk_buff_head fcoe_rx_list;
|
||||
struct page *crc_eof_page;
|
||||
int crc_eof_offset;
|
||||
};
|
||||
|
||||
/*
|
||||
* the fcoe sw transport private data
|
||||
*/
|
||||
struct fcoe_softc {
|
||||
struct list_head list;
|
||||
struct net_device *real_dev;
|
||||
struct net_device *phys_dev; /* device with ethtool_ops */
|
||||
struct packet_type fcoe_packet_type;
|
||||
struct packet_type fip_packet_type;
|
||||
struct sk_buff_head fcoe_pending_queue;
|
||||
u8 fcoe_pending_queue_active;
|
||||
struct fcoe_ctlr ctlr;
|
||||
};
|
||||
|
||||
#define fcoe_from_ctlr(fc) container_of(fc, struct fcoe_softc, ctlr)
|
||||
|
||||
static inline struct net_device *fcoe_netdev(
|
||||
const struct fc_lport *lp)
|
||||
{
|
||||
return ((struct fcoe_softc *)lport_priv(lp))->real_dev;
|
||||
}
|
||||
|
||||
#endif /* _FCOE_H_ */
|
@@ -1,561 +0,0 @@
|
||||
/*
|
||||
* Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Maintained at www.Open-FCoE.org
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/rtnetlink.h>
|
||||
|
||||
#include <scsi/fc/fc_els.h>
|
||||
#include <scsi/fc/fc_encaps.h>
|
||||
#include <scsi/fc/fc_fs.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
|
||||
#include <scsi/libfc.h>
|
||||
#include <scsi/libfcoe.h>
|
||||
#include <scsi/fc_transport_fcoe.h>
|
||||
|
||||
#define FCOE_SW_VERSION "0.1"
|
||||
#define FCOE_SW_NAME "fcoesw"
|
||||
#define FCOE_SW_VENDOR "Open-FCoE.org"
|
||||
|
||||
#define FCOE_MAX_LUN 255
|
||||
#define FCOE_MAX_FCP_TARGET 256
|
||||
|
||||
#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
|
||||
|
||||
#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
|
||||
#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
|
||||
|
||||
static struct scsi_transport_template *scsi_transport_fcoe_sw;
|
||||
|
||||
struct fc_function_template fcoe_sw_transport_function = {
|
||||
.show_host_node_name = 1,
|
||||
.show_host_port_name = 1,
|
||||
.show_host_supported_classes = 1,
|
||||
.show_host_supported_fc4s = 1,
|
||||
.show_host_active_fc4s = 1,
|
||||
.show_host_maxframe_size = 1,
|
||||
|
||||
.show_host_port_id = 1,
|
||||
.show_host_supported_speeds = 1,
|
||||
.get_host_speed = fc_get_host_speed,
|
||||
.show_host_speed = 1,
|
||||
.show_host_port_type = 1,
|
||||
.get_host_port_state = fc_get_host_port_state,
|
||||
.show_host_port_state = 1,
|
||||
.show_host_symbolic_name = 1,
|
||||
|
||||
.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
|
||||
.show_rport_maxframe_size = 1,
|
||||
.show_rport_supported_classes = 1,
|
||||
|
||||
.show_host_fabric_name = 1,
|
||||
.show_starget_node_name = 1,
|
||||
.show_starget_port_name = 1,
|
||||
.show_starget_port_id = 1,
|
||||
.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
|
||||
.show_rport_dev_loss_tmo = 1,
|
||||
.get_fc_host_stats = fc_get_host_stats,
|
||||
.issue_fc_host_lip = fcoe_reset,
|
||||
|
||||
.terminate_rport_io = fc_rport_terminate_io,
|
||||
};
|
||||
|
||||
static struct scsi_host_template fcoe_sw_shost_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "FCoE Driver",
|
||||
.proc_name = FCOE_SW_NAME,
|
||||
.queuecommand = fc_queuecommand,
|
||||
.eh_abort_handler = fc_eh_abort,
|
||||
.eh_device_reset_handler = fc_eh_device_reset,
|
||||
.eh_host_reset_handler = fc_eh_host_reset,
|
||||
.slave_alloc = fc_slave_alloc,
|
||||
.change_queue_depth = fc_change_queue_depth,
|
||||
.change_queue_type = fc_change_queue_type,
|
||||
.this_id = -1,
|
||||
.cmd_per_lun = 32,
|
||||
.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.max_sectors = 0xffff,
|
||||
};
|
||||
|
||||
/**
|
||||
* fcoe_sw_lport_config() - sets up the fc_lport
|
||||
* @lp: ptr to the fc_lport
|
||||
* @shost: ptr to the parent scsi host
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static int fcoe_sw_lport_config(struct fc_lport *lp)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
lp->link_up = 0;
|
||||
lp->qfull = 0;
|
||||
lp->max_retry_count = 3;
|
||||
lp->e_d_tov = 2 * 1000; /* FC-FS default */
|
||||
lp->r_a_tov = 2 * 2 * 1000;
|
||||
lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
|
||||
FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
|
||||
|
||||
/*
|
||||
* allocate per cpu stats block
|
||||
*/
|
||||
for_each_online_cpu(i)
|
||||
lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
|
||||
GFP_KERNEL);
|
||||
|
||||
/* lport fc_lport related configuration */
|
||||
fc_lport_config(lp);
|
||||
|
||||
/* offload related configuration */
|
||||
lp->crc_offload = 0;
|
||||
lp->seq_offload = 0;
|
||||
lp->lro_enabled = 0;
|
||||
lp->lro_xid = 0;
|
||||
lp->lso_max = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_sw_netdev_config() - Set up netdev for SW FCoE
|
||||
* @lp : ptr to the fc_lport
|
||||
* @netdev : ptr to the associated netdevice struct
|
||||
*
|
||||
* Must be called after fcoe_sw_lport_config() as it will use lport mutex
|
||||
*
|
||||
* Returns : 0 for success
|
||||
*/
|
||||
static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
|
||||
{
|
||||
u32 mfs;
|
||||
u64 wwnn, wwpn;
|
||||
struct fcoe_softc *fc;
|
||||
u8 flogi_maddr[ETH_ALEN];
|
||||
|
||||
/* Setup lport private data to point to fcoe softc */
|
||||
fc = lport_priv(lp);
|
||||
fc->lp = lp;
|
||||
fc->real_dev = netdev;
|
||||
fc->phys_dev = netdev;
|
||||
|
||||
/* Require support for get_pauseparam ethtool op. */
|
||||
if (netdev->priv_flags & IFF_802_1Q_VLAN)
|
||||
fc->phys_dev = vlan_dev_real_dev(netdev);
|
||||
|
||||
/* Do not support for bonding device */
|
||||
if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
|
||||
(fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
|
||||
(fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine max frame size based on underlying device and optional
|
||||
* user-configured limit. If the MFS is too low, fcoe_link_ok()
|
||||
* will return 0, so do this first.
|
||||
*/
|
||||
mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
|
||||
sizeof(struct fcoe_crc_eof));
|
||||
if (fc_set_mfs(lp, mfs))
|
||||
return -EINVAL;
|
||||
|
||||
if (!fcoe_link_ok(lp))
|
||||
lp->link_up = 1;
|
||||
|
||||
/* offload features support */
|
||||
if (fc->real_dev->features & NETIF_F_SG)
|
||||
lp->sg_supp = 1;
|
||||
|
||||
#ifdef NETIF_F_FCOE_CRC
|
||||
if (netdev->features & NETIF_F_FCOE_CRC) {
|
||||
lp->crc_offload = 1;
|
||||
printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n",
|
||||
netdev->name);
|
||||
}
|
||||
#endif
|
||||
#ifdef NETIF_F_FSO
|
||||
if (netdev->features & NETIF_F_FSO) {
|
||||
lp->seq_offload = 1;
|
||||
lp->lso_max = netdev->gso_max_size;
|
||||
printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n",
|
||||
netdev->name, lp->lso_max);
|
||||
}
|
||||
#endif
|
||||
if (netdev->fcoe_ddp_xid) {
|
||||
lp->lro_enabled = 1;
|
||||
lp->lro_xid = netdev->fcoe_ddp_xid;
|
||||
printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n",
|
||||
netdev->name, lp->lro_xid);
|
||||
}
|
||||
skb_queue_head_init(&fc->fcoe_pending_queue);
|
||||
fc->fcoe_pending_queue_active = 0;
|
||||
|
||||
/* setup Source Mac Address */
|
||||
memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
|
||||
fc->real_dev->addr_len);
|
||||
|
||||
wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
|
||||
fc_set_wwnn(lp, wwnn);
|
||||
/* XXX - 3rd arg needs to be vlan id */
|
||||
wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
|
||||
fc_set_wwpn(lp, wwpn);
|
||||
|
||||
/*
|
||||
* Add FCoE MAC address as second unicast MAC address
|
||||
* or enter promiscuous mode if not capable of listening
|
||||
* for multiple unicast MACs.
|
||||
*/
|
||||
rtnl_lock();
|
||||
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
|
||||
dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
|
||||
rtnl_unlock();
|
||||
|
||||
/*
|
||||
* setup the receive function from ethernet driver
|
||||
* on the ethertype for the given device
|
||||
*/
|
||||
fc->fcoe_packet_type.func = fcoe_rcv;
|
||||
fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
|
||||
fc->fcoe_packet_type.dev = fc->real_dev;
|
||||
dev_add_pack(&fc->fcoe_packet_type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_sw_shost_config() - Sets up fc_lport->host
|
||||
* @lp : ptr to the fc_lport
|
||||
* @shost : ptr to the associated scsi host
|
||||
* @dev : device associated to scsi host
|
||||
*
|
||||
* Must be called after fcoe_sw_lport_config() and fcoe_sw_netdev_config()
|
||||
*
|
||||
* Returns : 0 for success
|
||||
*/
|
||||
static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
|
||||
struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
/* lport scsi host config */
|
||||
lp->host = shost;
|
||||
|
||||
lp->host->max_lun = FCOE_MAX_LUN;
|
||||
lp->host->max_id = FCOE_MAX_FCP_TARGET;
|
||||
lp->host->max_channel = 0;
|
||||
lp->host->transportt = scsi_transport_fcoe_sw;
|
||||
|
||||
/* add the new host to the SCSI-ml */
|
||||
rc = scsi_add_host(lp->host, dev);
|
||||
if (rc) {
|
||||
FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
|
||||
return rc;
|
||||
}
|
||||
sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
|
||||
FCOE_SW_NAME, FCOE_SW_VERSION,
|
||||
fcoe_netdev(lp)->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_sw_em_config() - allocates em for this lport
|
||||
* @lp: the port that em is to allocated for
|
||||
*
|
||||
* Returns : 0 on success
|
||||
*/
|
||||
static inline int fcoe_sw_em_config(struct fc_lport *lp)
|
||||
{
|
||||
BUG_ON(lp->emp);
|
||||
|
||||
lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
|
||||
FCOE_MIN_XID, FCOE_MAX_XID);
|
||||
if (!lp->emp)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_sw_destroy() - FCoE software HBA tear-down function
|
||||
* @netdev: ptr to the associated net_device
|
||||
*
|
||||
* Returns: 0 if link is OK for use by FCoE.
|
||||
*/
|
||||
static int fcoe_sw_destroy(struct net_device *netdev)
|
||||
{
|
||||
int cpu;
|
||||
struct fc_lport *lp = NULL;
|
||||
struct fcoe_softc *fc;
|
||||
u8 flogi_maddr[ETH_ALEN];
|
||||
|
||||
BUG_ON(!netdev);
|
||||
|
||||
printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n",
|
||||
netdev->name);
|
||||
|
||||
lp = fcoe_hostlist_lookup(netdev);
|
||||
if (!lp)
|
||||
return -ENODEV;
|
||||
|
||||
fc = lport_priv(lp);
|
||||
|
||||
/* Logout of the fabric */
|
||||
fc_fabric_logoff(lp);
|
||||
|
||||
/* Remove the instance from fcoe's list */
|
||||
fcoe_hostlist_remove(lp);
|
||||
|
||||
/* Don't listen for Ethernet packets anymore */
|
||||
dev_remove_pack(&fc->fcoe_packet_type);
|
||||
|
||||
/* Cleanup the fc_lport */
|
||||
fc_lport_destroy(lp);
|
||||
fc_fcp_destroy(lp);
|
||||
|
||||
/* Detach from the scsi-ml */
|
||||
fc_remove_host(lp->host);
|
||||
scsi_remove_host(lp->host);
|
||||
|
||||
/* There are no more rports or I/O, free the EM */
|
||||
if (lp->emp)
|
||||
fc_exch_mgr_free(lp->emp);
|
||||
|
||||
/* Delete secondary MAC addresses */
|
||||
rtnl_lock();
|
||||
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
|
||||
dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
|
||||
if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
|
||||
dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
|
||||
rtnl_unlock();
|
||||
|
||||
/* Free the per-CPU revieve threads */
|
||||
fcoe_percpu_clean(lp);
|
||||
|
||||
/* Free existing skbs */
|
||||
fcoe_clean_pending_queue(lp);
|
||||
|
||||
/* Free memory used by statistical counters */
|
||||
for_each_online_cpu(cpu)
|
||||
kfree(lp->dev_stats[cpu]);
|
||||
|
||||
/* Release the net_device and Scsi_Host */
|
||||
dev_put(fc->real_dev);
|
||||
scsi_host_put(lp->host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* fcoe_sw_ddp_setup - calls LLD's ddp_setup through net_device
|
||||
* @lp: the corresponding fc_lport
|
||||
* @xid: the exchange id for this ddp transfer
|
||||
* @sgl: the scatterlist describing this transfer
|
||||
* @sgc: number of sg items
|
||||
*
|
||||
* Returns : 0 no ddp
|
||||
*/
|
||||
static int fcoe_sw_ddp_setup(struct fc_lport *lp, u16 xid,
|
||||
struct scatterlist *sgl, unsigned int sgc)
|
||||
{
|
||||
struct net_device *n = fcoe_netdev(lp);
|
||||
|
||||
if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
|
||||
return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* fcoe_sw_ddp_done - calls LLD's ddp_done through net_device
|
||||
* @lp: the corresponding fc_lport
|
||||
* @xid: the exchange id for this ddp transfer
|
||||
*
|
||||
* Returns : the length of data that have been completed by ddp
|
||||
*/
|
||||
static int fcoe_sw_ddp_done(struct fc_lport *lp, u16 xid)
|
||||
{
|
||||
struct net_device *n = fcoe_netdev(lp);
|
||||
|
||||
if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
|
||||
return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
|
||||
.frame_send = fcoe_xmit,
|
||||
.ddp_setup = fcoe_sw_ddp_setup,
|
||||
.ddp_done = fcoe_sw_ddp_done,
|
||||
};
|
||||
|
||||
/**
|
||||
* fcoe_sw_create() - this function creates the fcoe interface
|
||||
* @netdev: pointer the associated netdevice
|
||||
*
|
||||
* Creates fc_lport struct and scsi_host for lport, configures lport
|
||||
* and starts fabric login.
|
||||
*
|
||||
* Returns : 0 on success
|
||||
*/
|
||||
static int fcoe_sw_create(struct net_device *netdev)
|
||||
{
|
||||
int rc;
|
||||
struct fc_lport *lp = NULL;
|
||||
struct fcoe_softc *fc;
|
||||
struct Scsi_Host *shost;
|
||||
|
||||
BUG_ON(!netdev);
|
||||
|
||||
printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n",
|
||||
netdev->name);
|
||||
|
||||
lp = fcoe_hostlist_lookup(netdev);
|
||||
if (lp)
|
||||
return -EEXIST;
|
||||
|
||||
shost = fcoe_host_alloc(&fcoe_sw_shost_template,
|
||||
sizeof(struct fcoe_softc));
|
||||
if (!shost) {
|
||||
FC_DBG("Could not allocate host structure\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
lp = shost_priv(shost);
|
||||
fc = lport_priv(lp);
|
||||
|
||||
/* configure fc_lport, e.g., em */
|
||||
rc = fcoe_sw_lport_config(lp);
|
||||
if (rc) {
|
||||
FC_DBG("Could not configure lport\n");
|
||||
goto out_host_put;
|
||||
}
|
||||
|
||||
/* configure lport network properties */
|
||||
rc = fcoe_sw_netdev_config(lp, netdev);
|
||||
if (rc) {
|
||||
FC_DBG("Could not configure netdev for lport\n");
|
||||
goto out_host_put;
|
||||
}
|
||||
|
||||
/* configure lport scsi host properties */
|
||||
rc = fcoe_sw_shost_config(lp, shost, &netdev->dev);
|
||||
if (rc) {
|
||||
FC_DBG("Could not configure shost for lport\n");
|
||||
goto out_host_put;
|
||||
}
|
||||
|
||||
/* lport exch manager allocation */
|
||||
rc = fcoe_sw_em_config(lp);
|
||||
if (rc) {
|
||||
FC_DBG("Could not configure em for lport\n");
|
||||
goto out_host_put;
|
||||
}
|
||||
|
||||
/* Initialize the library */
|
||||
rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ);
|
||||
if (rc) {
|
||||
FC_DBG("Could not configure libfc for lport!\n");
|
||||
goto out_lp_destroy;
|
||||
}
|
||||
|
||||
/* add to lports list */
|
||||
fcoe_hostlist_add(lp);
|
||||
|
||||
lp->boot_time = jiffies;
|
||||
|
||||
fc_fabric_login(lp);
|
||||
|
||||
dev_hold(netdev);
|
||||
|
||||
return rc;
|
||||
|
||||
out_lp_destroy:
|
||||
fc_exch_mgr_free(lp->emp); /* Free the EM */
|
||||
out_host_put:
|
||||
scsi_host_put(lp->host);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_sw_match() - The FCoE SW transport match function
|
||||
*
|
||||
* Returns : false always
|
||||
*/
|
||||
static bool fcoe_sw_match(struct net_device *netdev)
|
||||
{
|
||||
/* FIXME - for sw transport, always return false */
|
||||
return false;
|
||||
}
|
||||
|
||||
/* the sw hba fcoe transport */
|
||||
struct fcoe_transport fcoe_sw_transport = {
|
||||
.name = "fcoesw",
|
||||
.create = fcoe_sw_create,
|
||||
.destroy = fcoe_sw_destroy,
|
||||
.match = fcoe_sw_match,
|
||||
.vendor = 0x0,
|
||||
.device = 0xffff,
|
||||
};
|
||||
|
||||
/**
|
||||
* fcoe_sw_init() - Registers fcoe_sw_transport
|
||||
*
|
||||
* Returns : 0 on success
|
||||
*/
|
||||
int __init fcoe_sw_init(void)
|
||||
{
|
||||
/* attach to scsi transport */
|
||||
scsi_transport_fcoe_sw =
|
||||
fc_attach_transport(&fcoe_sw_transport_function);
|
||||
|
||||
if (!scsi_transport_fcoe_sw) {
|
||||
printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
mutex_init(&fcoe_sw_transport.devlock);
|
||||
INIT_LIST_HEAD(&fcoe_sw_transport.devlist);
|
||||
|
||||
/* register sw transport */
|
||||
fcoe_transport_register(&fcoe_sw_transport);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_sw_exit() - Unregisters fcoe_sw_transport
|
||||
*
|
||||
* Returns : 0 on success
|
||||
*/
|
||||
int __exit fcoe_sw_exit(void)
|
||||
{
|
||||
/* dettach the transport */
|
||||
fc_release_transport(scsi_transport_fcoe_sw);
|
||||
fcoe_transport_unregister(&fcoe_sw_transport);
|
||||
return 0;
|
||||
}
|
File diff soppresso perché troppo grande
Carica Diff
@@ -75,7 +75,7 @@ MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
|
||||
module_param_named(max_targets, max_targets, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
|
||||
"[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
|
||||
module_param_named(disc_threads, disc_threads, uint, S_IRUGO | S_IWUSR);
|
||||
module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
|
||||
"[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
|
||||
module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
|
||||
@@ -640,6 +640,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
|
||||
|
||||
ibmvfc_dbg(vhost, "Releasing CRQ\n");
|
||||
free_irq(vdev->irq, vhost);
|
||||
tasklet_kill(&vhost->tasklet);
|
||||
do {
|
||||
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
|
||||
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
|
||||
@@ -2699,6 +2700,25 @@ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
|
||||
static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
|
||||
{
|
||||
struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(vhost->host->host_lock, flags);
|
||||
vio_disable_interrupts(to_vio_dev(vhost->dev));
|
||||
tasklet_schedule(&vhost->tasklet);
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_tasklet - Interrupt handler tasklet
|
||||
* @data: ibmvfc host struct
|
||||
*
|
||||
* Returns:
|
||||
* Nothing
|
||||
**/
|
||||
static void ibmvfc_tasklet(void *data)
|
||||
{
|
||||
struct ibmvfc_host *vhost = data;
|
||||
struct vio_dev *vdev = to_vio_dev(vhost->dev);
|
||||
struct ibmvfc_crq *crq;
|
||||
struct ibmvfc_async_crq *async;
|
||||
@@ -2706,7 +2726,6 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
|
||||
int done = 0;
|
||||
|
||||
spin_lock_irqsave(vhost->host->host_lock, flags);
|
||||
vio_disable_interrupts(to_vio_dev(vhost->dev));
|
||||
while (!done) {
|
||||
/* Pull all the valid messages off the CRQ */
|
||||
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
|
||||
@@ -2734,7 +2753,6 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3105,6 +3123,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
|
||||
|
||||
vhost->discovery_threads--;
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
|
||||
del_timer(&tgt->timer);
|
||||
|
||||
switch (status) {
|
||||
case IBMVFC_MAD_SUCCESS:
|
||||
@@ -3160,10 +3179,90 @@ static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
|
||||
mad->iu.rsp.len = sizeof(mad->fc_iu.response);
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
|
||||
* @evt: ibmvfc event struct
|
||||
*
|
||||
* Just cleanup this event struct. Everything else is handled by
|
||||
* the ADISC completion handler. If the ADISC never actually comes
|
||||
* back, we still have the timer running on the ADISC event struct
|
||||
* which will fire and cause the CRQ to get reset.
|
||||
*
|
||||
**/
|
||||
static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
|
||||
{
|
||||
struct ibmvfc_host *vhost = evt->vhost;
|
||||
struct ibmvfc_target *tgt = evt->tgt;
|
||||
|
||||
tgt_dbg(tgt, "ADISC cancel complete\n");
|
||||
vhost->abort_threads--;
|
||||
ibmvfc_free_event(evt);
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
wake_up(&vhost->work_wait_q);
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_adisc_timeout - Handle an ADISC timeout
|
||||
* @tgt: ibmvfc target struct
|
||||
*
|
||||
* If an ADISC times out, send a cancel. If the cancel times
|
||||
* out, reset the CRQ. When the ADISC comes back as cancelled,
|
||||
* log back into the target.
|
||||
**/
|
||||
static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
|
||||
{
|
||||
struct ibmvfc_host *vhost = tgt->vhost;
|
||||
struct ibmvfc_event *evt;
|
||||
struct ibmvfc_tmf *tmf;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
tgt_dbg(tgt, "ADISC timeout\n");
|
||||
spin_lock_irqsave(vhost->host->host_lock, flags);
|
||||
if (vhost->abort_threads >= disc_threads ||
|
||||
tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
|
||||
vhost->state != IBMVFC_INITIALIZING ||
|
||||
vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
vhost->abort_threads++;
|
||||
kref_get(&tgt->kref);
|
||||
evt = ibmvfc_get_event(vhost);
|
||||
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
|
||||
|
||||
evt->tgt = tgt;
|
||||
tmf = &evt->iu.tmf;
|
||||
memset(tmf, 0, sizeof(*tmf));
|
||||
tmf->common.version = 1;
|
||||
tmf->common.opcode = IBMVFC_TMF_MAD;
|
||||
tmf->common.length = sizeof(*tmf);
|
||||
tmf->scsi_id = tgt->scsi_id;
|
||||
tmf->cancel_key = tgt->cancel_key;
|
||||
|
||||
rc = ibmvfc_send_event(evt, vhost, default_timeout);
|
||||
|
||||
if (rc) {
|
||||
tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
|
||||
vhost->abort_threads--;
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
__ibmvfc_reset_host(vhost);
|
||||
} else
|
||||
tgt_dbg(tgt, "Attempting to cancel ADISC\n");
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_tgt_adisc - Initiate an ADISC for specified target
|
||||
* @tgt: ibmvfc target struct
|
||||
*
|
||||
* When sending an ADISC we end up with two timers running. The
|
||||
* first timer is the timer in the ibmvfc target struct. If this
|
||||
* fires, we send a cancel to the target. The second timer is the
|
||||
* timer on the ibmvfc event for the ADISC, which is longer. If that
|
||||
* fires, it means the ADISC timed out and our attempt to cancel it
|
||||
* also failed, so we need to reset the CRQ.
|
||||
**/
|
||||
static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
|
||||
{
|
||||
@@ -3184,6 +3283,7 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
|
||||
mad = &evt->iu.passthru;
|
||||
mad->iu.flags = IBMVFC_FC_ELS;
|
||||
mad->iu.scsi_id = tgt->scsi_id;
|
||||
mad->iu.cancel_key = tgt->cancel_key;
|
||||
|
||||
mad->fc_iu.payload[0] = IBMVFC_ADISC;
|
||||
memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
|
||||
@@ -3192,9 +3292,19 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
|
||||
sizeof(vhost->login_buf->resp.node_name));
|
||||
mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
|
||||
|
||||
if (timer_pending(&tgt->timer))
|
||||
mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
|
||||
else {
|
||||
tgt->timer.data = (unsigned long) tgt;
|
||||
tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
|
||||
tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout;
|
||||
add_timer(&tgt->timer);
|
||||
}
|
||||
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
|
||||
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
|
||||
if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
|
||||
vhost->discovery_threads--;
|
||||
del_timer(&tgt->timer);
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
} else
|
||||
@@ -3322,6 +3432,8 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
|
||||
tgt->new_scsi_id = scsi_id;
|
||||
tgt->vhost = vhost;
|
||||
tgt->need_login = 1;
|
||||
tgt->cancel_key = vhost->task_set++;
|
||||
init_timer(&tgt->timer);
|
||||
kref_init(&tgt->kref);
|
||||
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
|
||||
spin_lock_irqsave(vhost->host->host_lock, flags);
|
||||
@@ -3716,6 +3828,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
if (rport)
|
||||
fc_remote_port_delete(rport);
|
||||
del_timer_sync(&tgt->timer);
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
return;
|
||||
}
|
||||
@@ -3859,6 +3972,8 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
|
||||
|
||||
retrc = 0;
|
||||
|
||||
tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
|
||||
|
||||
if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
|
||||
dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
|
||||
goto req_irq_failed;
|
||||
@@ -3874,6 +3989,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
|
||||
return retrc;
|
||||
|
||||
req_irq_failed:
|
||||
tasklet_kill(&vhost->tasklet);
|
||||
do {
|
||||
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
|
||||
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
|
||||
@@ -4040,6 +4156,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
vhost->dev = dev;
|
||||
vhost->partition_number = -1;
|
||||
vhost->log_level = log_level;
|
||||
vhost->task_set = 1;
|
||||
strcpy(vhost->partition_name, "UNKNOWN");
|
||||
init_waitqueue_head(&vhost->work_wait_q);
|
||||
init_waitqueue_head(&vhost->init_wait_q);
|
||||
@@ -4174,6 +4291,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
|
||||
.show_host_supported_classes = 1,
|
||||
.show_host_port_type = 1,
|
||||
.show_host_port_id = 1,
|
||||
.show_host_maxframe_size = 1,
|
||||
|
||||
.get_host_port_state = ibmvfc_get_host_port_state,
|
||||
.show_host_port_state = 1,
|
||||
|
@@ -29,10 +29,14 @@
|
||||
#include "viosrp.h"
|
||||
|
||||
#define IBMVFC_NAME "ibmvfc"
|
||||
#define IBMVFC_DRIVER_VERSION "1.0.4"
|
||||
#define IBMVFC_DRIVER_DATE "(November 14, 2008)"
|
||||
#define IBMVFC_DRIVER_VERSION "1.0.5"
|
||||
#define IBMVFC_DRIVER_DATE "(March 19, 2009)"
|
||||
|
||||
#define IBMVFC_DEFAULT_TIMEOUT 60
|
||||
#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
|
||||
#define IBMVFC_ADISC_TIMEOUT 15
|
||||
#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \
|
||||
(IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
|
||||
#define IBMVFC_INIT_TIMEOUT 120
|
||||
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
|
||||
|
||||
@@ -53,9 +57,9 @@
|
||||
* Ensure we have resources for ERP and initialization:
|
||||
* 1 for ERP
|
||||
* 1 for initialization
|
||||
* 1 for each discovery thread
|
||||
* 2 for each discovery thread
|
||||
*/
|
||||
#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + disc_threads)
|
||||
#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2))
|
||||
|
||||
#define IBMVFC_MAD_SUCCESS 0x00
|
||||
#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
|
||||
@@ -585,10 +589,12 @@ struct ibmvfc_target {
|
||||
enum ibmvfc_target_action action;
|
||||
int need_login;
|
||||
int init_retries;
|
||||
u32 cancel_key;
|
||||
struct ibmvfc_service_parms service_parms;
|
||||
struct ibmvfc_service_parms service_parms_change;
|
||||
struct fc_rport_identifiers ids;
|
||||
void (*job_step) (struct ibmvfc_target *);
|
||||
struct timer_list timer;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
@@ -672,6 +678,7 @@ struct ibmvfc_host {
|
||||
int task_set;
|
||||
int init_retries;
|
||||
int discovery_threads;
|
||||
int abort_threads;
|
||||
int client_migrated;
|
||||
int reinit;
|
||||
int delay_init;
|
||||
@@ -684,6 +691,7 @@ struct ibmvfc_host {
|
||||
char partition_name[97];
|
||||
void (*job_step) (struct ibmvfc_host *);
|
||||
struct task_struct *work_thread;
|
||||
struct tasklet_struct tasklet;
|
||||
wait_queue_head_t init_wait_q;
|
||||
wait_queue_head_t work_wait_q;
|
||||
};
|
||||
|
@@ -41,7 +41,7 @@
|
||||
|
||||
MODULE_AUTHOR("Open-FCoE.org");
|
||||
MODULE_DESCRIPTION("libfc");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
||||
static int fc_fcp_debug;
|
||||
|
||||
@@ -407,10 +407,12 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
||||
|
||||
if (~crc != le32_to_cpu(fr_crc(fp))) {
|
||||
crc_err:
|
||||
stats = lp->dev_stats[smp_processor_id()];
|
||||
stats = fc_lport_get_stats(lp);
|
||||
stats->ErrorFrames++;
|
||||
/* FIXME - per cpu count, not total count! */
|
||||
if (stats->InvalidCRCCount++ < 5)
|
||||
FC_DBG("CRC error on data frame\n");
|
||||
printk(KERN_WARNING "CRC error on data frame for port (%6x)\n",
|
||||
fc_host_port_id(lp->host));
|
||||
/*
|
||||
* Assume the frame is total garbage.
|
||||
* We may have copied it over the good part
|
||||
@@ -1752,7 +1754,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
|
||||
/*
|
||||
* setup the data direction
|
||||
*/
|
||||
stats = lp->dev_stats[smp_processor_id()];
|
||||
stats = fc_lport_get_stats(lp);
|
||||
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
fsp->req_flags = FC_SRB_READ;
|
||||
stats->InputRequests++;
|
||||
|
@@ -267,10 +267,10 @@ EXPORT_SYMBOL(fc_get_host_speed);
|
||||
|
||||
struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
|
||||
{
|
||||
int i;
|
||||
struct fc_host_statistics *fcoe_stats;
|
||||
struct fc_lport *lp = shost_priv(shost);
|
||||
struct timespec v0, v1;
|
||||
unsigned int cpu;
|
||||
|
||||
fcoe_stats = &lp->host_stats;
|
||||
memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
|
||||
@@ -279,10 +279,11 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
|
||||
jiffies_to_timespec(lp->boot_time, &v1);
|
||||
fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
struct fcoe_dev_stats *stats = lp->dev_stats[i];
|
||||
if (stats == NULL)
|
||||
continue;
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct fcoe_dev_stats *stats;
|
||||
|
||||
stats = per_cpu_ptr(lp->dev_stats, cpu);
|
||||
|
||||
fcoe_stats->tx_frames += stats->TxFrames;
|
||||
fcoe_stats->tx_words += stats->TxWords;
|
||||
fcoe_stats->rx_frames += stats->RxFrames;
|
||||
|
@@ -1999,8 +1999,10 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
|
||||
|
||||
q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
|
||||
GFP_KERNEL, NULL);
|
||||
if (q->queue == ERR_PTR(-ENOMEM))
|
||||
if (IS_ERR(q->queue)) {
|
||||
q->queue = NULL;
|
||||
goto enomem;
|
||||
}
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
q->pool[i] = kzalloc(item_size, GFP_KERNEL);
|
||||
|
@@ -338,20 +338,6 @@ struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
|
||||
}
|
||||
EXPORT_SYMBOL(osd_start_request);
|
||||
|
||||
/*
|
||||
* If osd_finalize_request() was called but the request was not executed through
|
||||
* the block layer, then we must release BIOs.
|
||||
*/
|
||||
static void _abort_unexecuted_bios(struct request *rq)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
while ((bio = rq->bio) != NULL) {
|
||||
rq->bio = bio->bi_next;
|
||||
bio_endio(bio, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void _osd_free_seg(struct osd_request *or __unused,
|
||||
struct _osd_req_data_segment *seg)
|
||||
{
|
||||
@@ -363,9 +349,30 @@ static void _osd_free_seg(struct osd_request *or __unused,
|
||||
seg->alloc_size = 0;
|
||||
}
|
||||
|
||||
static void _put_request(struct request *rq , bool is_async)
|
||||
{
|
||||
if (is_async) {
|
||||
WARN_ON(rq->bio);
|
||||
__blk_put_request(rq->q, rq);
|
||||
} else {
|
||||
/*
|
||||
* If osd_finalize_request() was called but the request was not
|
||||
* executed through the block layer, then we must release BIOs.
|
||||
* TODO: Keep error code in or->async_error. Need to audit all
|
||||
* code paths.
|
||||
*/
|
||||
if (unlikely(rq->bio))
|
||||
blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
|
||||
else
|
||||
blk_put_request(rq);
|
||||
}
|
||||
}
|
||||
|
||||
void osd_end_request(struct osd_request *or)
|
||||
{
|
||||
struct request *rq = or->request;
|
||||
/* IMPORTANT: make sure this agrees with osd_execute_request_async */
|
||||
bool is_async = (or->request->end_io_data == or);
|
||||
|
||||
_osd_free_seg(or, &or->set_attr);
|
||||
_osd_free_seg(or, &or->enc_get_attr);
|
||||
@@ -373,12 +380,11 @@ void osd_end_request(struct osd_request *or)
|
||||
|
||||
if (rq) {
|
||||
if (rq->next_rq) {
|
||||
_abort_unexecuted_bios(rq->next_rq);
|
||||
blk_put_request(rq->next_rq);
|
||||
_put_request(rq->next_rq, is_async);
|
||||
rq->next_rq = NULL;
|
||||
}
|
||||
|
||||
_abort_unexecuted_bios(rq);
|
||||
blk_put_request(rq);
|
||||
_put_request(rq, is_async);
|
||||
}
|
||||
_osd_request_free(or);
|
||||
}
|
||||
|
@@ -345,10 +345,6 @@ static int osd_probe(struct device *dev)
|
||||
}
|
||||
|
||||
dev_set_drvdata(oud->class_member, oud);
|
||||
error = sysfs_create_link(&scsi_device->sdev_gendev.kobj,
|
||||
&oud->class_member->kobj, osd_symlink);
|
||||
if (error)
|
||||
OSD_ERR("warning: unable to make symlink\n");
|
||||
|
||||
OSD_INFO("osd_probe %s\n", disk->disk_name);
|
||||
return 0;
|
||||
@@ -377,8 +373,6 @@ static int osd_remove(struct device *dev)
|
||||
scsi_device);
|
||||
}
|
||||
|
||||
sysfs_remove_link(&oud->od.scsi_device->sdev_gendev.kobj, osd_symlink);
|
||||
|
||||
if (oud->class_member)
|
||||
device_destroy(osd_sysfs_class,
|
||||
MKDEV(SCSI_OSD_MAJOR, oud->minor));
|
||||
|
File diff soppresso perché troppo grande
Carica Diff
File diff soppresso perché troppo grande
Carica Diff
File diff soppresso perché troppo grande
Carica Diff
@@ -348,6 +348,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
@@ -384,11 +385,7 @@
|
||||
#define MEMORY_MAPPED_IO 1
|
||||
#endif
|
||||
|
||||
#define UNIQUE_FW_NAME
|
||||
#include "qla1280.h"
|
||||
#include "ql12160_fw.h" /* ISP RISC codes */
|
||||
#include "ql1280_fw.h"
|
||||
#include "ql1040_fw.h"
|
||||
|
||||
#ifndef BITS_PER_LONG
|
||||
#error "BITS_PER_LONG not defined!"
|
||||
@@ -541,10 +538,7 @@ __setup("qla1280=", qla1280_setup);
|
||||
struct qla_boards {
|
||||
unsigned char name[9]; /* Board ID String */
|
||||
int numPorts; /* Number of SCSI ports */
|
||||
unsigned short *fwcode; /* pointer to FW array */
|
||||
unsigned short *fwlen; /* number of words in array */
|
||||
unsigned short *fwstart; /* start address for F/W */
|
||||
unsigned char *fwver; /* Ptr to F/W version array */
|
||||
char *fwname; /* firmware name */
|
||||
};
|
||||
|
||||
/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
|
||||
@@ -567,19 +561,13 @@ MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
|
||||
|
||||
static struct qla_boards ql1280_board_tbl[] = {
|
||||
/* Name , Number of ports, FW details */
|
||||
{"QLA12160", 2, &fw12160i_code01[0], &fw12160i_length01,
|
||||
&fw12160i_addr01, &fw12160i_version_str[0]},
|
||||
{"QLA1040", 1, &risc_code01[0], &risc_code_length01,
|
||||
&risc_code_addr01, &firmware_version[0]},
|
||||
{"QLA1080", 1, &fw1280ei_code01[0], &fw1280ei_length01,
|
||||
&fw1280ei_addr01, &fw1280ei_version_str[0]},
|
||||
{"QLA1240", 2, &fw1280ei_code01[0], &fw1280ei_length01,
|
||||
&fw1280ei_addr01, &fw1280ei_version_str[0]},
|
||||
{"QLA1280", 2, &fw1280ei_code01[0], &fw1280ei_length01,
|
||||
&fw1280ei_addr01, &fw1280ei_version_str[0]},
|
||||
{"QLA10160", 1, &fw12160i_code01[0], &fw12160i_length01,
|
||||
&fw12160i_addr01, &fw12160i_version_str[0]},
|
||||
{" ", 0}
|
||||
{"QLA12160", 2, "qlogic/12160.bin"},
|
||||
{"QLA1040", 1, "qlogic/1040.bin"},
|
||||
{"QLA1080", 1, "qlogic/1280.bin"},
|
||||
{"QLA1240", 2, "qlogic/1280.bin"},
|
||||
{"QLA1280", 2, "qlogic/1280.bin"},
|
||||
{"QLA10160", 1, "qlogic/12160.bin"},
|
||||
{" ", 0, " "},
|
||||
};
|
||||
|
||||
static int qla1280_verbose = 1;
|
||||
@@ -704,7 +692,7 @@ qla1280_info(struct Scsi_Host *host)
|
||||
sprintf (bp,
|
||||
"QLogic %s PCI to SCSI Host Adapter\n"
|
||||
" Firmware version: %2d.%02d.%02d, Driver version %s",
|
||||
&bdp->name[0], bdp->fwver[0], bdp->fwver[1], bdp->fwver[2],
|
||||
&bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
|
||||
QLA1280_VERSION);
|
||||
return bp;
|
||||
}
|
||||
@@ -1648,36 +1636,60 @@ qla1280_chip_diag(struct scsi_qla_host *ha)
|
||||
static int
|
||||
qla1280_load_firmware_pio(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint16_t risc_address, *risc_code_address, risc_code_size;
|
||||
const struct firmware *fw;
|
||||
const __le16 *fw_data;
|
||||
uint16_t risc_address, risc_code_size;
|
||||
uint16_t mb[MAILBOX_REGISTER_COUNT], i;
|
||||
int err;
|
||||
|
||||
err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
|
||||
&ha->pdev->dev);
|
||||
if (err) {
|
||||
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
|
||||
ql1280_board_tbl[ha->devnum].fwname, err);
|
||||
return err;
|
||||
}
|
||||
if ((fw->size % 2) || (fw->size < 6)) {
|
||||
printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
|
||||
fw->size, ql1280_board_tbl[ha->devnum].fwname);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
ha->fwver1 = fw->data[0];
|
||||
ha->fwver2 = fw->data[1];
|
||||
ha->fwver3 = fw->data[2];
|
||||
fw_data = (const __le16 *)&fw->data[0];
|
||||
ha->fwstart = __le16_to_cpu(fw_data[2]);
|
||||
|
||||
/* Load RISC code. */
|
||||
risc_address = *ql1280_board_tbl[ha->devnum].fwstart;
|
||||
risc_code_address = ql1280_board_tbl[ha->devnum].fwcode;
|
||||
risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
|
||||
risc_address = ha->fwstart;
|
||||
fw_data = (const __le16 *)&fw->data[4];
|
||||
risc_code_size = (fw->size - 6) / 2;
|
||||
|
||||
for (i = 0; i < risc_code_size; i++) {
|
||||
mb[0] = MBC_WRITE_RAM_WORD;
|
||||
mb[1] = risc_address + i;
|
||||
mb[2] = risc_code_address[i];
|
||||
mb[2] = __le16_to_cpu(fw_data[i]);
|
||||
|
||||
err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
|
||||
if (err) {
|
||||
printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
|
||||
ha->host_no);
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
release_firmware(fw);
|
||||
return err;
|
||||
}
|
||||
|
||||
#define DUMP_IT_BACK 0 /* for debug of RISC loading */
|
||||
static int
|
||||
qla1280_load_firmware_dma(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint16_t risc_address, *risc_code_address, risc_code_size;
|
||||
const struct firmware *fw;
|
||||
const __le16 *fw_data;
|
||||
uint16_t risc_address, risc_code_size;
|
||||
uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
|
||||
int err = 0, num, i;
|
||||
#if DUMP_IT_BACK
|
||||
@@ -1689,10 +1701,29 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
|
||||
return -ENOMEM;
|
||||
#endif
|
||||
|
||||
err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
|
||||
&ha->pdev->dev);
|
||||
if (err) {
|
||||
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
|
||||
ql1280_board_tbl[ha->devnum].fwname, err);
|
||||
return err;
|
||||
}
|
||||
if ((fw->size % 2) || (fw->size < 6)) {
|
||||
printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
|
||||
fw->size, ql1280_board_tbl[ha->devnum].fwname);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
ha->fwver1 = fw->data[0];
|
||||
ha->fwver2 = fw->data[1];
|
||||
ha->fwver3 = fw->data[2];
|
||||
fw_data = (const __le16 *)&fw->data[0];
|
||||
ha->fwstart = __le16_to_cpu(fw_data[2]);
|
||||
|
||||
/* Load RISC code. */
|
||||
risc_address = *ql1280_board_tbl[ha->devnum].fwstart;
|
||||
risc_code_address = ql1280_board_tbl[ha->devnum].fwcode;
|
||||
risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
|
||||
risc_address = ha->fwstart;
|
||||
fw_data = (const __le16 *)&fw->data[4];
|
||||
risc_code_size = (fw->size - 6) / 2;
|
||||
|
||||
dprintk(1, "%s: DMA RISC code (%i) words\n",
|
||||
__func__, risc_code_size);
|
||||
@@ -1708,10 +1739,9 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
|
||||
|
||||
dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
|
||||
"%d,%d(0x%x)\n",
|
||||
risc_code_address, cnt, num, risc_address);
|
||||
fw_data, cnt, num, risc_address);
|
||||
for(i = 0; i < cnt; i++)
|
||||
((__le16 *)ha->request_ring)[i] =
|
||||
cpu_to_le16(risc_code_address[i]);
|
||||
((__le16 *)ha->request_ring)[i] = fw_data[i];
|
||||
|
||||
mb[0] = MBC_LOAD_RAM;
|
||||
mb[1] = risc_address;
|
||||
@@ -1763,7 +1793,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
|
||||
#endif
|
||||
risc_address += cnt;
|
||||
risc_code_size = risc_code_size - cnt;
|
||||
risc_code_address = risc_code_address + cnt;
|
||||
fw_data = fw_data + cnt;
|
||||
num++;
|
||||
}
|
||||
|
||||
@@ -1771,6 +1801,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
|
||||
#if DUMP_IT_BACK
|
||||
pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
|
||||
#endif
|
||||
release_firmware(fw);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1786,7 +1817,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
|
||||
/* Verify checksum of loaded RISC code. */
|
||||
mb[0] = MBC_VERIFY_CHECKSUM;
|
||||
/* mb[1] = ql12_risc_code_addr01; */
|
||||
mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
|
||||
mb[1] = ha->fwstart;
|
||||
err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
|
||||
if (err) {
|
||||
printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
|
||||
@@ -1796,7 +1827,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
|
||||
/* Start firmware execution. */
|
||||
dprintk(1, "%s: start firmware running.\n", __func__);
|
||||
mb[0] = MBC_EXECUTE_FIRMWARE;
|
||||
mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
|
||||
mb[1] = ha->fwstart;
|
||||
err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
|
||||
if (err) {
|
||||
printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
|
||||
@@ -4450,6 +4481,9 @@ module_exit(qla1280_exit);
|
||||
MODULE_AUTHOR("Qlogic & Jes Sorensen");
|
||||
MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_FIRMWARE("qlogic/1040.bin");
|
||||
MODULE_FIRMWARE("qlogic/1280.bin");
|
||||
MODULE_FIRMWARE("qlogic/12160.bin");
|
||||
MODULE_VERSION(QLA1280_VERSION);
|
||||
|
||||
/*
|
||||
|
@@ -1069,6 +1069,12 @@ struct scsi_qla_host {
|
||||
|
||||
struct nvram nvram;
|
||||
int nvram_valid;
|
||||
|
||||
/* Firmware Info */
|
||||
unsigned short fwstart; /* start address for F/W */
|
||||
unsigned char fwver1; /* F/W version first char */
|
||||
unsigned char fwver2; /* F/W version second char */
|
||||
unsigned char fwver3; /* F/W version third char */
|
||||
};
|
||||
|
||||
#endif /* _QLA1280_H */
|
||||
|
@@ -96,7 +96,9 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return 0;
|
||||
|
||||
/* Read NVRAM data from cache. */
|
||||
if (IS_NOCACHE_VPD_TYPE(ha))
|
||||
ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2,
|
||||
ha->nvram_size);
|
||||
return memory_read_from_buffer(buf, count, &off, ha->nvram,
|
||||
ha->nvram_size);
|
||||
}
|
||||
@@ -111,7 +113,8 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
uint16_t cnt;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size)
|
||||
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
|
||||
!ha->isp_ops->write_nvram)
|
||||
return 0;
|
||||
|
||||
/* Checksum NVRAM. */
|
||||
@@ -137,12 +140,21 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
|
||||
*iter = chksum;
|
||||
}
|
||||
|
||||
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"HBA not online, failing NVRAM update.\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* Write NVRAM. */
|
||||
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
|
||||
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
|
||||
count);
|
||||
|
||||
/* NVRAM settings take effect immediately. */
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
qla2x00_wait_for_chip_reset(vha);
|
||||
|
||||
return (count);
|
||||
}
|
||||
@@ -330,6 +342,12 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
|
||||
if (ha->optrom_state != QLA_SWRITING)
|
||||
break;
|
||||
|
||||
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"HBA not online, failing flash update.\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Writing flash region -- 0x%x/0x%x.\n",
|
||||
ha->optrom_region_start, ha->optrom_region_size));
|
||||
@@ -364,7 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return 0;
|
||||
|
||||
/* Read NVRAM data from cache. */
|
||||
if (IS_NOCACHE_VPD_TYPE(ha))
|
||||
ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
|
||||
ha->vpd_size);
|
||||
return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
|
||||
}
|
||||
|
||||
@@ -376,14 +396,35 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
|
||||
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
|
||||
struct device, kobj)));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
uint8_t *tmp_data;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
|
||||
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
|
||||
!ha->isp_ops->write_nvram)
|
||||
return 0;
|
||||
|
||||
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"HBA not online, failing VPD update.\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* Write NVRAM. */
|
||||
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
|
||||
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
|
||||
|
||||
/* Update flash version information for 4Gb & above. */
|
||||
if (!IS_FWI2_CAPABLE(ha))
|
||||
goto done;
|
||||
|
||||
tmp_data = vmalloc(256);
|
||||
if (!tmp_data) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to allocate memory for VPD information update.\n");
|
||||
goto done;
|
||||
}
|
||||
ha->isp_ops->get_flash_version(vha, tmp_data);
|
||||
vfree(tmp_data);
|
||||
done:
|
||||
return count;
|
||||
}
|
||||
|
||||
@@ -458,6 +499,199 @@ static struct bin_attribute sysfs_sfp_attr = {
|
||||
.read = qla2x00_sysfs_read_sfp,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
qla2x00_sysfs_write_reset(struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
|
||||
struct device, kobj)));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int type;
|
||||
|
||||
if (off != 0)
|
||||
return 0;
|
||||
|
||||
type = simple_strtol(buf, NULL, 10);
|
||||
switch (type) {
|
||||
case 0x2025c:
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"Issuing ISP reset on (%ld).\n", vha->host_no);
|
||||
|
||||
scsi_block_requests(vha->host);
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
qla2x00_wait_for_chip_reset(vha);
|
||||
scsi_unblock_requests(vha->host);
|
||||
break;
|
||||
case 0x2025d:
|
||||
if (!IS_QLA81XX(ha))
|
||||
break;
|
||||
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"Issuing MPI reset on (%ld).\n", vha->host_no);
|
||||
|
||||
/* Make sure FC side is not in reset */
|
||||
qla2x00_wait_for_hba_online(vha);
|
||||
|
||||
/* Issue MPI reset */
|
||||
scsi_block_requests(vha->host);
|
||||
if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"MPI reset failed on (%ld).\n", vha->host_no);
|
||||
scsi_unblock_requests(vha->host);
|
||||
break;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct bin_attribute sysfs_reset_attr = {
|
||||
.attr = {
|
||||
.name = "reset",
|
||||
.mode = S_IWUSR,
|
||||
},
|
||||
.size = 0,
|
||||
.write = qla2x00_sysfs_write_reset,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
qla2x00_sysfs_write_edc(struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
|
||||
struct device, kobj)));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
uint16_t dev, adr, opt, len;
|
||||
int rval;
|
||||
|
||||
ha->edc_data_len = 0;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
|
||||
return 0;
|
||||
|
||||
if (!ha->edc_data) {
|
||||
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
|
||||
&ha->edc_data_dma);
|
||||
if (!ha->edc_data) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Unable to allocate memory for EDC write.\n"));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
dev = le16_to_cpup((void *)&buf[0]);
|
||||
adr = le16_to_cpup((void *)&buf[2]);
|
||||
opt = le16_to_cpup((void *)&buf[4]);
|
||||
len = le16_to_cpup((void *)&buf[6]);
|
||||
|
||||
if (!(opt & BIT_0))
|
||||
if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(ha->edc_data, &buf[8], len);
|
||||
|
||||
rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma,
|
||||
ha->edc_data, len, opt);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
|
||||
rval, dev, adr, opt, len, *buf));
|
||||
return 0;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct bin_attribute sysfs_edc_attr = {
|
||||
.attr = {
|
||||
.name = "edc",
|
||||
.mode = S_IWUSR,
|
||||
},
|
||||
.size = 0,
|
||||
.write = qla2x00_sysfs_write_edc,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
qla2x00_sysfs_write_edc_status(struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
|
||||
struct device, kobj)));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
uint16_t dev, adr, opt, len;
|
||||
int rval;
|
||||
|
||||
ha->edc_data_len = 0;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
|
||||
return 0;
|
||||
|
||||
if (!ha->edc_data) {
|
||||
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
|
||||
&ha->edc_data_dma);
|
||||
if (!ha->edc_data) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Unable to allocate memory for EDC status.\n"));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
dev = le16_to_cpup((void *)&buf[0]);
|
||||
adr = le16_to_cpup((void *)&buf[2]);
|
||||
opt = le16_to_cpup((void *)&buf[4]);
|
||||
len = le16_to_cpup((void *)&buf[6]);
|
||||
|
||||
if (!(opt & BIT_0))
|
||||
if (len == 0 || len > DMA_POOL_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
memset(ha->edc_data, 0, len);
|
||||
rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma,
|
||||
ha->edc_data, len, opt);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
|
||||
rval, dev, adr, opt, len));
|
||||
return 0;
|
||||
}
|
||||
|
||||
ha->edc_data_len = len;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qla2x00_sysfs_read_edc_status(struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
|
||||
struct device, kobj)));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
|
||||
return 0;
|
||||
|
||||
if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(buf, ha->edc_data, ha->edc_data_len);
|
||||
|
||||
return ha->edc_data_len;
|
||||
}
|
||||
|
||||
static struct bin_attribute sysfs_edc_status_attr = {
|
||||
.attr = {
|
||||
.name = "edc_status",
|
||||
.mode = S_IRUSR | S_IWUSR,
|
||||
},
|
||||
.size = 0,
|
||||
.write = qla2x00_sysfs_write_edc_status,
|
||||
.read = qla2x00_sysfs_read_edc_status,
|
||||
};
|
||||
|
||||
static struct sysfs_entry {
|
||||
char *name;
|
||||
struct bin_attribute *attr;
|
||||
@@ -469,6 +703,9 @@ static struct sysfs_entry {
|
||||
{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
|
||||
{ "vpd", &sysfs_vpd_attr, 1 },
|
||||
{ "sfp", &sysfs_sfp_attr, 1 },
|
||||
{ "reset", &sysfs_reset_attr, },
|
||||
{ "edc", &sysfs_edc_attr, 2 },
|
||||
{ "edc_status", &sysfs_edc_status_attr, 2 },
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
@@ -482,6 +719,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
|
||||
for (iter = bin_file_entries; iter->name; iter++) {
|
||||
if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
|
||||
continue;
|
||||
if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
|
||||
continue;
|
||||
|
||||
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
|
||||
iter->attr);
|
||||
@@ -502,6 +741,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
|
||||
for (iter = bin_file_entries; iter->name; iter++) {
|
||||
if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
|
||||
continue;
|
||||
if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
|
||||
continue;
|
||||
|
||||
sysfs_remove_bin_file(&host->shost_gendev.kobj,
|
||||
iter->attr);
|
||||
@@ -818,9 +1059,33 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
|
||||
if (!IS_QLA81XX(ha))
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x (%x)\n",
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
|
||||
ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
|
||||
ha->mpi_version[3], ha->mpi_capabilities);
|
||||
ha->mpi_capabilities);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!IS_QLA81XX(ha))
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
|
||||
ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qla2x00_flash_block_size_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
|
||||
@@ -848,6 +1113,9 @@ static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
|
||||
static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
|
||||
NULL);
|
||||
static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
|
||||
static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
|
||||
static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
|
||||
NULL);
|
||||
|
||||
struct device_attribute *qla2x00_host_attrs[] = {
|
||||
&dev_attr_driver_version,
|
||||
@@ -868,6 +1136,8 @@ struct device_attribute *qla2x00_host_attrs[] = {
|
||||
&dev_attr_optrom_fw_version,
|
||||
&dev_attr_total_isp_aborts,
|
||||
&dev_attr_mpi_version,
|
||||
&dev_attr_phy_version,
|
||||
&dev_attr_flash_block_size,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -1012,7 +1282,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
|
||||
if (!fcport)
|
||||
return;
|
||||
|
||||
qla2x00_abort_fcport_cmds(fcport);
|
||||
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev)))
|
||||
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
|
||||
else
|
||||
qla2x00_abort_fcport_cmds(fcport);
|
||||
|
||||
/*
|
||||
* Transport has effectively 'deleted' the rport, clear
|
||||
@@ -1032,16 +1305,18 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
|
||||
if (!fcport)
|
||||
return;
|
||||
|
||||
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
|
||||
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* At this point all fcport's software-states are cleared. Perform any
|
||||
* final cleanup of firmware resources (PCBs and XCBs).
|
||||
*/
|
||||
if (fcport->loop_id != FC_NO_LOOP_ID) {
|
||||
if (fcport->loop_id != FC_NO_LOOP_ID)
|
||||
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
|
||||
fcport->loop_id, fcport->d_id.b.domain,
|
||||
fcport->d_id.b.area, fcport->d_id.b.al_pa);
|
||||
fcport->loop_id = FC_NO_LOOP_ID;
|
||||
}
|
||||
|
||||
qla2x00_abort_fcport_cmds(fcport);
|
||||
}
|
||||
|
@@ -176,8 +176,7 @@
|
||||
/* ISP request and response entry counts (37-65535) */
|
||||
#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
|
||||
#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */
|
||||
#define REQUEST_ENTRY_CNT_2XXX_EXT_MEM 4096 /* Number of request entries. */
|
||||
#define REQUEST_ENTRY_CNT_24XX 4096 /* Number of request entries. */
|
||||
#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
|
||||
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
|
||||
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
|
||||
|
||||
@@ -201,20 +200,7 @@ typedef struct srb {
|
||||
/*
|
||||
* SRB flag definitions
|
||||
*/
|
||||
#define SRB_TIMEOUT BIT_0 /* Command timed out */
|
||||
#define SRB_DMA_VALID BIT_1 /* Command sent to ISP */
|
||||
#define SRB_WATCHDOG BIT_2 /* Command on watchdog list */
|
||||
#define SRB_ABORT_PENDING BIT_3 /* Command abort sent to device */
|
||||
|
||||
#define SRB_ABORTED BIT_4 /* Command aborted command already */
|
||||
#define SRB_RETRY BIT_5 /* Command needs retrying */
|
||||
#define SRB_GOT_SENSE BIT_6 /* Command has sense data */
|
||||
#define SRB_FAILOVER BIT_7 /* Command in failover state */
|
||||
|
||||
#define SRB_BUSY BIT_8 /* Command is in busy retry state */
|
||||
#define SRB_FO_CANCEL BIT_9 /* Command don't need to do failover */
|
||||
#define SRB_IOCTL BIT_10 /* IOCTL command. */
|
||||
#define SRB_TAPE BIT_11 /* FCP2 (Tape) command. */
|
||||
#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
|
||||
|
||||
/*
|
||||
* ISP I/O Register Set structure definitions.
|
||||
@@ -372,10 +358,10 @@ struct device_reg_2xxx {
|
||||
};
|
||||
|
||||
struct device_reg_25xxmq {
|
||||
volatile uint32_t req_q_in;
|
||||
volatile uint32_t req_q_out;
|
||||
volatile uint32_t rsp_q_in;
|
||||
volatile uint32_t rsp_q_out;
|
||||
uint32_t req_q_in;
|
||||
uint32_t req_q_out;
|
||||
uint32_t rsp_q_in;
|
||||
uint32_t rsp_q_out;
|
||||
};
|
||||
|
||||
typedef union {
|
||||
@@ -620,6 +606,7 @@ typedef struct {
|
||||
#define MBC_GET_TIMEOUT_PARAMS 0x22 /* Get FW timeouts. */
|
||||
#define MBC_TRACE_CONTROL 0x27 /* Trace control command. */
|
||||
#define MBC_GEN_SYSTEM_ERROR 0x2a /* Generate System Error. */
|
||||
#define MBC_WRITE_SFP 0x30 /* Write SFP Data. */
|
||||
#define MBC_READ_SFP 0x31 /* Read SFP Data. */
|
||||
#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */
|
||||
#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */
|
||||
@@ -1570,39 +1557,13 @@ typedef struct fc_port {
|
||||
#define FCS_DEVICE_DEAD 2
|
||||
#define FCS_DEVICE_LOST 3
|
||||
#define FCS_ONLINE 4
|
||||
#define FCS_NOT_SUPPORTED 5
|
||||
#define FCS_FAILOVER 6
|
||||
#define FCS_FAILOVER_FAILED 7
|
||||
|
||||
/*
|
||||
* FC port flags.
|
||||
*/
|
||||
#define FCF_FABRIC_DEVICE BIT_0
|
||||
#define FCF_LOGIN_NEEDED BIT_1
|
||||
#define FCF_FO_MASKED BIT_2
|
||||
#define FCF_FAILOVER_NEEDED BIT_3
|
||||
#define FCF_RESET_NEEDED BIT_4
|
||||
#define FCF_PERSISTENT_BOUND BIT_5
|
||||
#define FCF_TAPE_PRESENT BIT_6
|
||||
#define FCF_FARP_DONE BIT_7
|
||||
#define FCF_FARP_FAILED BIT_8
|
||||
#define FCF_FARP_REPLY_NEEDED BIT_9
|
||||
#define FCF_AUTH_REQ BIT_10
|
||||
#define FCF_SEND_AUTH_REQ BIT_11
|
||||
#define FCF_RECEIVE_AUTH_REQ BIT_12
|
||||
#define FCF_AUTH_SUCCESS BIT_13
|
||||
#define FCF_RLC_SUPPORT BIT_14
|
||||
#define FCF_CONFIG BIT_15 /* Needed? */
|
||||
#define FCF_RESCAN_NEEDED BIT_16
|
||||
#define FCF_XP_DEVICE BIT_17
|
||||
#define FCF_MSA_DEVICE BIT_18
|
||||
#define FCF_EVA_DEVICE BIT_19
|
||||
#define FCF_MSA_PORT_ACTIVE BIT_20
|
||||
#define FCF_FAILBACK_DISABLE BIT_21
|
||||
#define FCF_FAILOVER_DISABLE BIT_22
|
||||
#define FCF_DSXXX_DEVICE BIT_23
|
||||
#define FCF_AA_EVA_DEVICE BIT_24
|
||||
#define FCF_AA_MSA_DEVICE BIT_25
|
||||
#define FCF_TAPE_PRESENT BIT_2
|
||||
|
||||
/* No loop ID flag. */
|
||||
#define FC_NO_LOOP_ID 0x1000
|
||||
@@ -2102,9 +2063,6 @@ struct isp_operations {
|
||||
|
||||
int (*get_flash_version) (struct scsi_qla_host *, void *);
|
||||
int (*start_scsi) (srb_t *);
|
||||
void (*wrt_req_reg) (struct qla_hw_data *, uint16_t, uint16_t);
|
||||
void (*wrt_rsp_reg) (struct qla_hw_data *, uint16_t, uint16_t);
|
||||
uint16_t (*rd_req_reg) (struct qla_hw_data *, uint16_t);
|
||||
};
|
||||
|
||||
/* MSI-X Support *************************************************************/
|
||||
@@ -2200,6 +2158,8 @@ struct rsp_que {
|
||||
dma_addr_t dma;
|
||||
response_t *ring;
|
||||
response_t *ring_ptr;
|
||||
uint32_t __iomem *rsp_q_in; /* FWI2-capable only. */
|
||||
uint32_t __iomem *rsp_q_out;
|
||||
uint16_t ring_index;
|
||||
uint16_t out_ptr;
|
||||
uint16_t length;
|
||||
@@ -2217,6 +2177,8 @@ struct req_que {
|
||||
dma_addr_t dma;
|
||||
request_t *ring;
|
||||
request_t *ring_ptr;
|
||||
uint32_t __iomem *req_q_in; /* FWI2-capable only. */
|
||||
uint32_t __iomem *req_q_out;
|
||||
uint16_t ring_index;
|
||||
uint16_t in_ptr;
|
||||
uint16_t cnt;
|
||||
@@ -2256,10 +2218,10 @@ struct qla_hw_data {
|
||||
uint32_t msix_enabled :1;
|
||||
uint32_t disable_serdes :1;
|
||||
uint32_t gpsc_supported :1;
|
||||
uint32_t vsan_enabled :1;
|
||||
uint32_t npiv_supported :1;
|
||||
uint32_t fce_enabled :1;
|
||||
uint32_t hw_event_marker_found:1;
|
||||
uint32_t fac_supported :1;
|
||||
uint32_t chip_reset_done :1;
|
||||
} flags;
|
||||
|
||||
/* This spinlock is used to protect "io transactions", you must
|
||||
@@ -2277,7 +2239,7 @@ struct qla_hw_data {
|
||||
|
||||
#define MIN_IOBASE_LEN 0x100
|
||||
/* Multi queue data structs */
|
||||
device_reg_t *mqiobase;
|
||||
device_reg_t __iomem *mqiobase;
|
||||
uint16_t msix_count;
|
||||
uint8_t mqenable;
|
||||
struct req_que **req_q_map;
|
||||
@@ -2300,7 +2262,6 @@ struct qla_hw_data {
|
||||
uint16_t max_loop_id;
|
||||
|
||||
uint16_t fb_rev;
|
||||
uint16_t max_public_loop_ids;
|
||||
uint16_t min_external_loopid; /* First external loop Id */
|
||||
|
||||
#define PORT_SPEED_UNKNOWN 0xFFFF
|
||||
@@ -2381,6 +2342,8 @@ struct qla_hw_data {
|
||||
IS_QLA25XX(ha) || IS_QLA81XX(ha))
|
||||
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
|
||||
(ha)->flags.msix_enabled)
|
||||
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
|
||||
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
|
||||
|
||||
#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
|
||||
#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
|
||||
@@ -2425,6 +2388,10 @@ struct qla_hw_data {
|
||||
void *sfp_data;
|
||||
dma_addr_t sfp_data_dma;
|
||||
|
||||
uint8_t *edc_data;
|
||||
dma_addr_t edc_data_dma;
|
||||
uint16_t edc_data_len;
|
||||
|
||||
struct task_struct *dpc_thread;
|
||||
uint8_t dpc_active; /* DPC routine is active */
|
||||
|
||||
@@ -2439,6 +2406,8 @@ struct qla_hw_data {
|
||||
dma_addr_t init_cb_dma;
|
||||
init_cb_t *init_cb;
|
||||
int init_cb_size;
|
||||
dma_addr_t ex_init_cb_dma;
|
||||
struct ex_init_cb_81xx *ex_init_cb;
|
||||
|
||||
/* These are used by mailbox operations. */
|
||||
volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
|
||||
@@ -2453,15 +2422,6 @@ struct qla_hw_data {
|
||||
struct completion mbx_cmd_comp; /* Serialize mbx access */
|
||||
struct completion mbx_intr_comp; /* Used for completion notification */
|
||||
|
||||
uint32_t mbx_flags;
|
||||
#define MBX_IN_PROGRESS BIT_0
|
||||
#define MBX_BUSY BIT_1 /* Got the Access */
|
||||
#define MBX_SLEEPING_ON_SEM BIT_2
|
||||
#define MBX_POLLING_FOR_COMP BIT_3
|
||||
#define MBX_COMPLETED BIT_4
|
||||
#define MBX_TIMEDOUT BIT_5
|
||||
#define MBX_ACCESS_TIMEDOUT BIT_6
|
||||
|
||||
/* Basic firmware related information. */
|
||||
uint16_t fw_major_version;
|
||||
uint16_t fw_minor_version;
|
||||
@@ -2473,13 +2433,15 @@ struct qla_hw_data {
|
||||
#define RISC_START_ADDRESS_2100 0x1000
|
||||
#define RISC_START_ADDRESS_2300 0x800
|
||||
#define RISC_START_ADDRESS_2400 0x100000
|
||||
uint16_t fw_xcb_count;
|
||||
|
||||
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
|
||||
uint8_t fw_seriallink_options[4];
|
||||
uint16_t fw_seriallink_options24[4];
|
||||
|
||||
uint8_t mpi_version[4];
|
||||
uint8_t mpi_version[3];
|
||||
uint32_t mpi_capabilities;
|
||||
uint8_t phy_version[3];
|
||||
|
||||
/* Firmware dump information. */
|
||||
struct qla2xxx_fw_dump *fw_dump;
|
||||
@@ -2545,6 +2507,8 @@ struct qla_hw_data {
|
||||
uint32_t flt_region_boot;
|
||||
uint32_t flt_region_fw;
|
||||
uint32_t flt_region_vpd_nvram;
|
||||
uint32_t flt_region_vpd;
|
||||
uint32_t flt_region_nvram;
|
||||
uint32_t flt_region_npiv_conf;
|
||||
|
||||
/* Needed for BEACON */
|
||||
@@ -2613,36 +2577,19 @@ typedef struct scsi_qla_host {
|
||||
#define LOOP_RESYNC_ACTIVE 5
|
||||
#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
|
||||
#define RSCN_UPDATE 7 /* Perform an RSCN update. */
|
||||
#define MAILBOX_RETRY 8
|
||||
#define ISP_RESET_NEEDED 9 /* Initiate a ISP reset. */
|
||||
#define FAILOVER_EVENT_NEEDED 10
|
||||
#define FAILOVER_EVENT 11
|
||||
#define FAILOVER_NEEDED 12
|
||||
#define SCSI_RESTART_NEEDED 13 /* Processes SCSI retry queue. */
|
||||
#define PORT_RESTART_NEEDED 14 /* Processes Retry queue. */
|
||||
#define RESTART_QUEUES_NEEDED 15 /* Restarts the Lun queue. */
|
||||
#define ABORT_QUEUES_NEEDED 16
|
||||
#define RELOGIN_NEEDED 17
|
||||
#define LOGIN_RETRY_NEEDED 18 /* Initiate required fabric logins. */
|
||||
#define REGISTER_FC4_NEEDED 19 /* SNS FC4 registration required. */
|
||||
#define ISP_ABORT_RETRY 20 /* ISP aborted. */
|
||||
#define FCPORT_RESCAN_NEEDED 21 /* IO descriptor processing needed */
|
||||
#define IODESC_PROCESS_NEEDED 22 /* IO descriptor processing needed */
|
||||
#define IOCTL_ERROR_RECOVERY 23
|
||||
#define LOOP_RESET_NEEDED 24
|
||||
#define BEACON_BLINK_NEEDED 25
|
||||
#define REGISTER_FDMI_NEEDED 26
|
||||
#define FCPORT_UPDATE_NEEDED 27
|
||||
#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
|
||||
#define UNLOADING 29
|
||||
#define NPIV_CONFIG_NEEDED 30
|
||||
#define RELOGIN_NEEDED 8
|
||||
#define REGISTER_FC4_NEEDED 9 /* SNS FC4 registration required. */
|
||||
#define ISP_ABORT_RETRY 10 /* ISP aborted. */
|
||||
#define BEACON_BLINK_NEEDED 11
|
||||
#define REGISTER_FDMI_NEEDED 12
|
||||
#define FCPORT_UPDATE_NEEDED 13
|
||||
#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */
|
||||
#define UNLOADING 15
|
||||
#define NPIV_CONFIG_NEEDED 16
|
||||
|
||||
uint32_t device_flags;
|
||||
#define DFLG_LOCAL_DEVICES BIT_0
|
||||
#define DFLG_RETRY_LOCAL_DEVICES BIT_1
|
||||
#define DFLG_FABRIC_DEVICES BIT_2
|
||||
#define SWITCH_FOUND BIT_3
|
||||
#define DFLG_NO_CABLE BIT_4
|
||||
#define SWITCH_FOUND BIT_0
|
||||
#define DFLG_NO_CABLE BIT_1
|
||||
|
||||
srb_t *status_srb; /* Status continuation entry. */
|
||||
|
||||
@@ -2755,10 +2702,5 @@ typedef struct scsi_qla_host {
|
||||
#include "qla_inline.h"
|
||||
|
||||
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
|
||||
#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
|
||||
#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
|
||||
#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
|
||||
#define CMD_ACTUAL_SNSLEN(Cmnd) ((Cmnd)->SCp.Message)
|
||||
#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
|
||||
|
||||
#endif
|
||||
|
@@ -71,7 +71,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
|
||||
|
||||
mutex_unlock(&ha->fce_mutex);
|
||||
out:
|
||||
return single_open(file, qla2x00_dfs_fce_show, ha);
|
||||
return single_open(file, qla2x00_dfs_fce_show, vha);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -145,7 +145,7 @@ create_dir:
|
||||
atomic_inc(&qla2x00_dfs_root_count);
|
||||
|
||||
create_nodes:
|
||||
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, ha,
|
||||
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
|
||||
&dfs_fce_ops);
|
||||
if (!ha->dfs_fce) {
|
||||
qla_printk(KERN_NOTICE, ha,
|
||||
|
@@ -1403,6 +1403,21 @@ struct access_chip_rsp_84xx {
|
||||
#define MBA_IDC_TIME_EXT 0x8102
|
||||
|
||||
#define MBC_IDC_ACK 0x101
|
||||
#define MBC_RESTART_MPI_FW 0x3d
|
||||
#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */
|
||||
|
||||
/* Flash access control option field bit definitions */
|
||||
#define FAC_OPT_FORCE_SEMAPHORE BIT_15
|
||||
#define FAC_OPT_REQUESTOR_ID BIT_14
|
||||
#define FAC_OPT_CMD_SUBCODE 0xff
|
||||
|
||||
/* Flash access control command subcodes */
|
||||
#define FAC_OPT_CMD_WRITE_PROTECT 0x00
|
||||
#define FAC_OPT_CMD_WRITE_ENABLE 0x01
|
||||
#define FAC_OPT_CMD_ERASE_SECTOR 0x02
|
||||
#define FAC_OPT_CMD_LOCK_SEMAPHORE 0x03
|
||||
#define FAC_OPT_CMD_UNLOCK_SEMAPHORE 0x04
|
||||
#define FAC_OPT_CMD_GET_SECTOR_SIZE 0x05
|
||||
|
||||
struct nvram_81xx {
|
||||
/* NVRAM header. */
|
||||
@@ -1440,7 +1455,17 @@ struct nvram_81xx {
|
||||
uint16_t reserved_6[24];
|
||||
|
||||
/* Offset 128. */
|
||||
uint16_t reserved_7[64];
|
||||
uint16_t ex_version;
|
||||
uint8_t prio_fcf_matching_flags;
|
||||
uint8_t reserved_6_1[3];
|
||||
uint16_t pri_fcf_vlan_id;
|
||||
uint8_t pri_fcf_fabric_name[8];
|
||||
uint16_t reserved_6_2[7];
|
||||
uint8_t spma_mac_addr[6];
|
||||
uint16_t reserved_6_3[14];
|
||||
|
||||
/* Offset 192. */
|
||||
uint16_t reserved_7[32];
|
||||
|
||||
/*
|
||||
* BIT 0 = Enable spinup delay
|
||||
@@ -1664,6 +1689,17 @@ struct mid_init_cb_81xx {
|
||||
struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
|
||||
};
|
||||
|
||||
struct ex_init_cb_81xx {
|
||||
uint16_t ex_version;
|
||||
uint8_t prio_fcf_matching_flags;
|
||||
uint8_t reserved_1[3];
|
||||
uint16_t pri_fcf_vlan_id;
|
||||
uint8_t pri_fcf_fabric_name[8];
|
||||
uint16_t reserved_2[7];
|
||||
uint8_t spma_mac_addr[6];
|
||||
uint16_t reserved_3[14];
|
||||
};
|
||||
|
||||
#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
|
||||
#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
|
||||
|
||||
@@ -1672,6 +1708,10 @@ struct mid_init_cb_81xx {
|
||||
#define FA_RISC_CODE_ADDR_81 0xA0000
|
||||
#define FA_FW_AREA_ADDR_81 0xC0000
|
||||
#define FA_VPD_NVRAM_ADDR_81 0xD0000
|
||||
#define FA_VPD0_ADDR_81 0xD0000
|
||||
#define FA_VPD1_ADDR_81 0xD0400
|
||||
#define FA_NVRAM0_ADDR_81 0xD0080
|
||||
#define FA_NVRAM1_ADDR_81 0xD0480
|
||||
#define FA_FEATURE_ADDR_81 0xD4000
|
||||
#define FA_FLASH_DESCR_ADDR_81 0xD8000
|
||||
#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
|
||||
|
@@ -73,6 +73,7 @@ extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
|
||||
extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
|
||||
fc_host_event_code, u32);
|
||||
extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *);
|
||||
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
|
||||
|
||||
extern void qla2x00_abort_fcport_cmds(fc_port_t *);
|
||||
extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
|
||||
@@ -82,7 +83,7 @@ extern void qla2x00_relogin(struct scsi_qla_host *);
|
||||
/*
|
||||
* Global Functions in qla_mid.c source file.
|
||||
*/
|
||||
extern struct scsi_host_template qla24xx_driver_template;
|
||||
extern struct scsi_host_template qla2xxx_driver_template;
|
||||
extern struct scsi_transport_template *qla2xxx_transport_vport_template;
|
||||
extern void qla2x00_timer(scsi_qla_host_t *);
|
||||
extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
|
||||
@@ -110,6 +111,7 @@ extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
|
||||
extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
|
||||
|
||||
extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
|
||||
extern int qla2x00_wait_for_chip_reset(scsi_qla_host_t *);
|
||||
|
||||
extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
|
||||
extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
|
||||
@@ -144,8 +146,8 @@ extern int
|
||||
qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
|
||||
|
||||
extern void
|
||||
qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *,
|
||||
uint16_t *, uint16_t *, uint16_t *, uint32_t *, uint8_t *, uint32_t *);
|
||||
qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
|
||||
uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
|
||||
|
||||
extern int
|
||||
qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
|
||||
@@ -262,6 +264,14 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
|
||||
extern int
|
||||
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t);
|
||||
|
||||
extern int
|
||||
qla2x00_read_edc(scsi_qla_host_t *, uint16_t, uint16_t, dma_addr_t,
|
||||
uint8_t *, uint16_t, uint16_t);
|
||||
|
||||
extern int
|
||||
qla2x00_write_edc(scsi_qla_host_t *, uint16_t, uint16_t, dma_addr_t,
|
||||
uint8_t *, uint16_t, uint16_t);
|
||||
|
||||
extern int
|
||||
qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *);
|
||||
|
||||
@@ -269,6 +279,15 @@ extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *);
|
||||
|
||||
extern int qla81xx_idc_ack(scsi_qla_host_t *, uint16_t *);
|
||||
|
||||
extern int
|
||||
qla81xx_fac_get_sector_size(scsi_qla_host_t *, uint32_t *);
|
||||
|
||||
extern int
|
||||
qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
|
||||
|
||||
extern int
|
||||
qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
|
||||
|
||||
/*
|
||||
* Global Function Prototypes in qla_isr.c source file.
|
||||
*/
|
||||
|
@@ -20,7 +20,6 @@
|
||||
* QLogic ISP2x00 Hardware Support Function Prototypes.
|
||||
*/
|
||||
static int qla2x00_isp_firmware(scsi_qla_host_t *);
|
||||
static void qla2x00_resize_request_q(scsi_qla_host_t *);
|
||||
static int qla2x00_setup_chip(scsi_qla_host_t *);
|
||||
static int qla2x00_init_rings(scsi_qla_host_t *);
|
||||
static int qla2x00_fw_ready(scsi_qla_host_t *);
|
||||
@@ -61,8 +60,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
|
||||
int rval;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req = ha->req_q_map[0];
|
||||
|
||||
/* Clear adapter flags. */
|
||||
vha->flags.online = 0;
|
||||
ha->flags.chip_reset_done = 0;
|
||||
vha->flags.reset_active = 0;
|
||||
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
|
||||
atomic_set(&vha->loop_state, LOOP_DOWN);
|
||||
@@ -70,7 +71,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
|
||||
vha->dpc_flags = 0;
|
||||
vha->flags.management_server_logged_in = 0;
|
||||
vha->marker_needed = 0;
|
||||
ha->mbx_flags = 0;
|
||||
ha->isp_abort_cnt = 0;
|
||||
ha->beacon_blink_led = 0;
|
||||
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
|
||||
@@ -131,6 +131,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
|
||||
}
|
||||
}
|
||||
rval = qla2x00_init_rings(vha);
|
||||
ha->flags.chip_reset_done = 1;
|
||||
|
||||
return (rval);
|
||||
}
|
||||
@@ -512,7 +513,6 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
|
||||
static inline void
|
||||
qla24xx_reset_risc(scsi_qla_host_t *vha)
|
||||
{
|
||||
int hw_evt = 0;
|
||||
unsigned long flags = 0;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
@@ -542,8 +542,6 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
|
||||
d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
|
||||
barrier();
|
||||
}
|
||||
if (cnt == 0)
|
||||
hw_evt = 1;
|
||||
|
||||
/* Wait for soft-reset to complete. */
|
||||
d2 = RD_REG_DWORD(®->ctrl_status);
|
||||
@@ -816,7 +814,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
|
||||
qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
|
||||
FCE_SIZE / 1024);
|
||||
|
||||
fce_size = sizeof(struct qla2xxx_fce_chain) + EFT_SIZE;
|
||||
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
|
||||
ha->flags.fce_enabled = 1;
|
||||
ha->fce_dma = tc_dma;
|
||||
ha->fce = tc;
|
||||
@@ -893,62 +891,6 @@ cont_alloc:
|
||||
htonl(offsetof(struct qla2xxx_fw_dump, isp));
|
||||
}
|
||||
|
||||
/**
|
||||
* qla2x00_resize_request_q() - Resize request queue given available ISP memory.
|
||||
* @ha: HA context
|
||||
*
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
static void
|
||||
qla2x00_resize_request_q(scsi_qla_host_t *vha)
|
||||
{
|
||||
int rval;
|
||||
uint16_t fw_iocb_cnt = 0;
|
||||
uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM;
|
||||
dma_addr_t request_dma;
|
||||
request_t *request_ring;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req = ha->req_q_map[0];
|
||||
|
||||
/* Valid only on recent ISPs. */
|
||||
if (IS_QLA2100(ha) || IS_QLA2200(ha))
|
||||
return;
|
||||
|
||||
/* Retrieve IOCB counts available to the firmware. */
|
||||
rval = qla2x00_get_resource_cnts(vha, NULL, NULL, NULL, &fw_iocb_cnt,
|
||||
&ha->max_npiv_vports);
|
||||
if (rval)
|
||||
return;
|
||||
/* No point in continuing if current settings are sufficient. */
|
||||
if (fw_iocb_cnt < 1024)
|
||||
return;
|
||||
if (req->length >= request_q_length)
|
||||
return;
|
||||
|
||||
/* Attempt to claim larger area for request queue. */
|
||||
request_ring = dma_alloc_coherent(&ha->pdev->dev,
|
||||
(request_q_length + 1) * sizeof(request_t), &request_dma,
|
||||
GFP_KERNEL);
|
||||
if (request_ring == NULL)
|
||||
return;
|
||||
|
||||
/* Resize successful, report extensions. */
|
||||
qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n",
|
||||
(ha->fw_memory_size + 1) / 1024);
|
||||
qla_printk(KERN_INFO, ha, "Resizing request queue depth "
|
||||
"(%d -> %d)...\n", req->length, request_q_length);
|
||||
|
||||
/* Clear old allocations. */
|
||||
dma_free_coherent(&ha->pdev->dev,
|
||||
(req->length + 1) * sizeof(request_t), req->ring,
|
||||
req->dma);
|
||||
|
||||
/* Begin using larger queue. */
|
||||
req->length = request_q_length;
|
||||
req->ring = request_ring;
|
||||
req->dma = request_dma;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla2x00_setup_chip() - Load and start RISC firmware.
|
||||
* @ha: HA context
|
||||
@@ -963,6 +905,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
||||
unsigned long flags;
|
||||
uint16_t fw_major_version;
|
||||
|
||||
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
|
||||
/* Disable SRAM, Instruction RAM and GP RAM parity. */
|
||||
@@ -986,13 +929,15 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
|
||||
|
||||
rval = qla2x00_execute_fw(vha, srisc_address);
|
||||
/* Retrieve firmware information. */
|
||||
if (rval == QLA_SUCCESS && ha->fw_major_version == 0) {
|
||||
if (rval == QLA_SUCCESS) {
|
||||
fw_major_version = ha->fw_major_version;
|
||||
qla2x00_get_fw_version(vha,
|
||||
&ha->fw_major_version,
|
||||
&ha->fw_minor_version,
|
||||
&ha->fw_subminor_version,
|
||||
&ha->fw_attributes, &ha->fw_memory_size,
|
||||
ha->mpi_version, &ha->mpi_capabilities);
|
||||
ha->mpi_version, &ha->mpi_capabilities,
|
||||
ha->phy_version);
|
||||
ha->flags.npiv_supported = 0;
|
||||
if (IS_QLA2XXX_MIDTYPE(ha) &&
|
||||
(ha->fw_attributes & BIT_2)) {
|
||||
@@ -1003,9 +948,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
|
||||
ha->max_npiv_vports =
|
||||
MIN_MULTI_ID_FABRIC - 1;
|
||||
}
|
||||
qla2x00_resize_request_q(vha);
|
||||
qla2x00_get_resource_cnts(vha, NULL,
|
||||
&ha->fw_xcb_count, NULL, NULL,
|
||||
&ha->max_npiv_vports);
|
||||
|
||||
if (ql2xallocfwdump)
|
||||
if (!fw_major_version && ql2xallocfwdump)
|
||||
qla2x00_alloc_fw_dump(vha);
|
||||
}
|
||||
} else {
|
||||
@@ -1028,6 +975,21 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
}
|
||||
|
||||
if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
|
||||
uint32_t size;
|
||||
|
||||
rval = qla81xx_fac_get_sector_size(vha, &size);
|
||||
if (rval == QLA_SUCCESS) {
|
||||
ha->flags.fac_supported = 1;
|
||||
ha->fdt_block_size = size << 2;
|
||||
} else {
|
||||
qla_printk(KERN_ERR, ha,
|
||||
"Unsupported FAC firmware (%d.%02d.%02d).\n",
|
||||
ha->fw_major_version, ha->fw_minor_version,
|
||||
ha->fw_subminor_version);
|
||||
}
|
||||
}
|
||||
|
||||
if (rval) {
|
||||
DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
|
||||
vha->host_no));
|
||||
@@ -1314,8 +1276,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
|
||||
mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
|
||||
}
|
||||
|
||||
|
||||
mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
|
||||
if (IS_FWI2_CAPABLE(ha)) {
|
||||
mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
|
||||
mid_init_cb->init_cb.execution_throttle =
|
||||
cpu_to_le16(ha->fw_xcb_count);
|
||||
}
|
||||
|
||||
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
|
||||
if (rval) {
|
||||
@@ -1989,7 +1954,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
|
||||
fcport->port_type = FCT_UNKNOWN;
|
||||
fcport->loop_id = FC_NO_LOOP_ID;
|
||||
atomic_set(&fcport->state, FCS_UNCONFIGURED);
|
||||
fcport->flags = FCF_RLC_SUPPORT;
|
||||
fcport->supported_classes = FC_COS_UNSPECIFIED;
|
||||
|
||||
return fcport;
|
||||
@@ -2171,7 +2135,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
|
||||
vha->host_no, fcport->loop_id));
|
||||
|
||||
atomic_set(&fcport->state, FCS_DEVICE_LOST);
|
||||
fcport->flags &= ~FCF_FARP_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2228,8 +2191,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
|
||||
WWN_SIZE))
|
||||
continue;
|
||||
|
||||
fcport->flags &= ~(FCF_FABRIC_DEVICE |
|
||||
FCF_PERSISTENT_BOUND);
|
||||
fcport->flags &= ~FCF_FABRIC_DEVICE;
|
||||
fcport->loop_id = new_fcport->loop_id;
|
||||
fcport->port_type = new_fcport->port_type;
|
||||
fcport->d_id.b24 = new_fcport->d_id.b24;
|
||||
@@ -2242,7 +2204,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
|
||||
|
||||
if (!found) {
|
||||
/* New device, add to fcports list. */
|
||||
new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
|
||||
if (vha->vp_idx) {
|
||||
new_fcport->vha = vha;
|
||||
new_fcport->vp_idx = vha->vp_idx;
|
||||
@@ -2275,11 +2236,6 @@ cleanup_allocation:
|
||||
"rval=%x\n", vha->host_no, rval));
|
||||
}
|
||||
|
||||
if (found_devs) {
|
||||
vha->device_flags |= DFLG_LOCAL_DEVICES;
|
||||
vha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES;
|
||||
}
|
||||
|
||||
return (rval);
|
||||
}
|
||||
|
||||
@@ -2765,7 +2721,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
|
||||
fcport->loop_id = FC_NO_LOOP_ID;
|
||||
fcport->flags |= (FCF_FABRIC_DEVICE |
|
||||
FCF_LOGIN_NEEDED);
|
||||
fcport->flags &= ~FCF_PERSISTENT_BOUND;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2808,9 +2763,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
|
||||
kfree(swl);
|
||||
kfree(new_fcport);
|
||||
|
||||
if (!list_empty(new_fcports))
|
||||
vha->device_flags |= DFLG_FABRIC_DEVICES;
|
||||
|
||||
return (rval);
|
||||
}
|
||||
|
||||
@@ -2993,7 +2945,6 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
|
||||
0, 0);
|
||||
}
|
||||
}
|
||||
fcport->flags &= ~FCF_FARP_DONE;
|
||||
}
|
||||
}
|
||||
return (rval);
|
||||
@@ -3302,6 +3253,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
|
||||
|
||||
if (vha->flags.online) {
|
||||
vha->flags.online = 0;
|
||||
ha->flags.chip_reset_done = 0;
|
||||
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
ha->qla_stats.total_isp_aborts++;
|
||||
|
||||
@@ -3451,6 +3403,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
|
||||
|
||||
if (!status && !(status = qla2x00_init_rings(vha))) {
|
||||
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
|
||||
ha->flags.chip_reset_done = 1;
|
||||
/* Initialize the queues in use */
|
||||
qla25xx_init_queues(ha);
|
||||
|
||||
@@ -4338,23 +4291,17 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
|
||||
/* Determine NVRAM starting address. */
|
||||
ha->nvram_size = sizeof(struct nvram_81xx);
|
||||
ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
|
||||
ha->vpd_size = FA_NVRAM_VPD_SIZE;
|
||||
ha->vpd_base = FA_NVRAM_VPD0_ADDR;
|
||||
if (PCI_FUNC(ha->pdev->devfn) & 1) {
|
||||
ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
|
||||
ha->vpd_base = FA_NVRAM_VPD1_ADDR;
|
||||
}
|
||||
|
||||
/* Get VPD data into cache */
|
||||
ha->vpd = ha->nvram + VPD_OFFSET;
|
||||
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
|
||||
ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
|
||||
ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
|
||||
ha->vpd_size);
|
||||
|
||||
/* Get NVRAM data into cache and calculate checksum. */
|
||||
dptr = (uint32_t *)nv;
|
||||
ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
|
||||
ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
|
||||
ha->nvram_size);
|
||||
dptr = (uint32_t *)nv;
|
||||
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
|
||||
chksum += le32_to_cpu(*dptr++);
|
||||
|
||||
@@ -4452,6 +4399,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
|
||||
}
|
||||
|
||||
/* Use extended-initialization control block. */
|
||||
memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
|
||||
|
||||
/*
|
||||
* Setup driver NVRAM options.
|
||||
*/
|
||||
|
@@ -776,7 +776,7 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
|
||||
req_cnt = qla24xx_calc_iocbs(tot_dsds);
|
||||
if (req->cnt < (req_cnt + 2)) {
|
||||
cnt = ha->isp_ops->rd_req_reg(ha, req->id);
|
||||
cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
|
||||
|
||||
if (req->ring_index < cnt)
|
||||
req->cnt = cnt - req->ring_index;
|
||||
@@ -836,7 +836,8 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
sp->flags |= SRB_DMA_VALID;
|
||||
|
||||
/* Set chip new ring index. */
|
||||
ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
|
||||
WRT_REG_DWORD(req->req_q_in, req->ring_index);
|
||||
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
|
||||
|
||||
/* Manage unprocessed RIO/ZIO commands in response queue. */
|
||||
if (vha->flags.process_response_queue &&
|
||||
@@ -854,35 +855,3 @@ queuing_error:
|
||||
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
|
||||
{
|
||||
device_reg_t __iomem *reg = (void *) ha->iobase;
|
||||
return RD_REG_DWORD_RELAXED(®->isp24.req_q_out);
|
||||
}
|
||||
|
||||
uint16_t
|
||||
qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
|
||||
{
|
||||
device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
|
||||
return RD_REG_DWORD_RELAXED(®->isp25mq.req_q_out);
|
||||
}
|
||||
|
||||
void
|
||||
qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
|
||||
{
|
||||
device_reg_t __iomem *reg = (void *) ha->iobase;
|
||||
WRT_REG_DWORD(®->isp24.req_q_in, index);
|
||||
RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
|
||||
}
|
||||
|
||||
void
|
||||
qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
|
||||
{
|
||||
device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
|
||||
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
|
||||
WRT_REG_DWORD(®->isp25mq.req_q_in, index);
|
||||
RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
|
||||
}
|
||||
|
||||
|
@@ -852,9 +852,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
|
||||
/* Free outstanding command slot. */
|
||||
req->outstanding_cmds[index] = NULL;
|
||||
|
||||
CMD_COMPL_STATUS(sp->cmd) = 0L;
|
||||
CMD_SCSI_STATUS(sp->cmd) = 0L;
|
||||
|
||||
/* Save ISP completion status */
|
||||
sp->cmd->result = DID_OK << 16;
|
||||
|
||||
@@ -955,7 +952,6 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
|
||||
if (sense_len >= SCSI_SENSE_BUFFERSIZE)
|
||||
sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||
|
||||
CMD_ACTUAL_SNSLEN(cp) = sense_len;
|
||||
sp->request_sense_length = sense_len;
|
||||
sp->request_sense_ptr = cp->sense_buffer;
|
||||
if (sp->request_sense_length > 32)
|
||||
@@ -973,8 +969,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
|
||||
cp->device->channel, cp->device->id, cp->device->lun, cp,
|
||||
cp->serial_number));
|
||||
if (sense_len)
|
||||
DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
|
||||
CMD_ACTUAL_SNSLEN(cp)));
|
||||
DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1043,9 +1038,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
}
|
||||
|
||||
lscsi_status = scsi_status & STATUS_MASK;
|
||||
CMD_ENTRY_STATUS(cp) = sts->entry_status;
|
||||
CMD_COMPL_STATUS(cp) = comp_status;
|
||||
CMD_SCSI_STATUS(cp) = scsi_status;
|
||||
|
||||
fcport = sp->fcport;
|
||||
|
||||
@@ -1104,7 +1096,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
|
||||
resid = resid_len;
|
||||
scsi_set_resid(cp, resid);
|
||||
CMD_RESID_LEN(cp) = resid;
|
||||
|
||||
if (!lscsi_status &&
|
||||
((unsigned)(scsi_bufflen(cp) - resid) <
|
||||
@@ -1160,7 +1151,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
|
||||
if (scsi_status & SS_RESIDUAL_UNDER) {
|
||||
scsi_set_resid(cp, resid);
|
||||
CMD_RESID_LEN(cp) = resid;
|
||||
} else {
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"scsi(%ld:%d:%d) UNDERRUN status detected "
|
||||
@@ -1499,7 +1489,6 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
|
||||
void
|
||||
qla24xx_process_response_queue(struct rsp_que *rsp)
|
||||
{
|
||||
struct qla_hw_data *ha = rsp->hw;
|
||||
struct sts_entry_24xx *pkt;
|
||||
struct scsi_qla_host *vha;
|
||||
|
||||
@@ -1553,7 +1542,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp)
|
||||
}
|
||||
|
||||
/* Adjust ring index */
|
||||
ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
|
||||
WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -2029,7 +2018,7 @@ skip_msix:
|
||||
skip_msi:
|
||||
|
||||
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
|
||||
IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
|
||||
IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
|
||||
if (ret) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Failed to reserve interrupt %d already in use.\n",
|
||||
@@ -2117,18 +2106,3 @@ int qla25xx_request_irq(struct rsp_que *rsp)
|
||||
msix->rsp = rsp;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
|
||||
{
|
||||
device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
|
||||
WRT_REG_DWORD(®->isp25mq.rsp_q_out, index);
|
||||
}
|
||||
|
||||
void
|
||||
qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
|
||||
{
|
||||
device_reg_t __iomem *reg = (void *) ha->iobase;
|
||||
WRT_REG_DWORD(®->isp24.rsp_q_out, index);
|
||||
}
|
||||
|
||||
|
@@ -45,6 +45,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
if (ha->pdev->error_state > pci_channel_io_frozen)
|
||||
return QLA_FUNCTION_TIMEOUT;
|
||||
|
||||
reg = ha->iobase;
|
||||
io_lock_on = base_vha->flags.init_done;
|
||||
|
||||
@@ -408,7 +411,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
|
||||
void
|
||||
qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
|
||||
uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
|
||||
uint32_t *mpi_caps)
|
||||
uint32_t *mpi_caps, uint8_t *phy)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
@@ -420,7 +423,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
|
||||
mcp->out_mb = MBX_0;
|
||||
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
if (IS_QLA81XX(vha->hw))
|
||||
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
|
||||
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
|
||||
mcp->flags = 0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
@@ -435,11 +438,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
|
||||
else
|
||||
*memory = (mcp->mb[5] << 16) | mcp->mb[4];
|
||||
if (IS_QLA81XX(vha->hw)) {
|
||||
mpi[0] = mcp->mb[10] >> 8;
|
||||
mpi[1] = mcp->mb[10] & 0xff;
|
||||
mpi[2] = mcp->mb[11] >> 8;
|
||||
mpi[3] = mcp->mb[11] & 0xff;
|
||||
mpi[0] = mcp->mb[10] & 0xff;
|
||||
mpi[1] = mcp->mb[11] >> 8;
|
||||
mpi[2] = mcp->mb[11] & 0xff;
|
||||
*mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
|
||||
phy[0] = mcp->mb[8] & 0xff;
|
||||
phy[1] = mcp->mb[9] >> 8;
|
||||
phy[2] = mcp->mb[9] & 0xff;
|
||||
}
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
@@ -1043,14 +1048,22 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
|
||||
else
|
||||
mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
|
||||
|
||||
mcp->mb[1] = 0;
|
||||
mcp->mb[2] = MSW(ha->init_cb_dma);
|
||||
mcp->mb[3] = LSW(ha->init_cb_dma);
|
||||
mcp->mb[4] = 0;
|
||||
mcp->mb[5] = 0;
|
||||
mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
|
||||
mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
|
||||
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
|
||||
mcp->in_mb = MBX_5|MBX_4|MBX_0;
|
||||
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) {
|
||||
mcp->mb[1] = BIT_0;
|
||||
mcp->mb[10] = MSW(ha->ex_init_cb_dma);
|
||||
mcp->mb[11] = LSW(ha->ex_init_cb_dma);
|
||||
mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
|
||||
mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
|
||||
mcp->mb[14] = sizeof(*ha->ex_init_cb);
|
||||
mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
|
||||
}
|
||||
mcp->in_mb = MBX_0;
|
||||
mcp->buf_size = size;
|
||||
mcp->flags = MBX_DMA_OUT;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
@@ -1187,10 +1200,6 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
|
||||
fcport->d_id.b.al_pa = pd->port_id[2];
|
||||
fcport->d_id.b.rsvd_1 = 0;
|
||||
|
||||
/* Check for device require authentication. */
|
||||
pd->common_features & BIT_5 ? (fcport->flags |= FCF_AUTH_REQ) :
|
||||
(fcport->flags &= ~FCF_AUTH_REQ);
|
||||
|
||||
/* If not target must be initiator or unknown type. */
|
||||
if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
|
||||
fcport->port_type = FCT_INITIATOR;
|
||||
@@ -3218,3 +3227,204 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
if (!IS_QLA81XX(vha->hw))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
|
||||
|
||||
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
|
||||
mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
|
||||
mcp->out_mb = MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_1|MBX_0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
|
||||
__func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
|
||||
} else {
|
||||
DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
|
||||
*sector_size = mcp->mb[1];
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
if (!IS_QLA81XX(vha->hw))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
|
||||
|
||||
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
|
||||
mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
|
||||
FAC_OPT_CMD_WRITE_PROTECT;
|
||||
mcp->out_mb = MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_1|MBX_0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
|
||||
__func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
|
||||
} else {
|
||||
DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
if (!IS_QLA81XX(vha->hw))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
|
||||
|
||||
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
|
||||
mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
|
||||
mcp->mb[2] = LSW(start);
|
||||
mcp->mb[3] = MSW(start);
|
||||
mcp->mb[4] = LSW(finish);
|
||||
mcp->mb[5] = MSW(finish);
|
||||
mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_2|MBX_1|MBX_0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
|
||||
"mb[2]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0],
|
||||
mcp->mb[1], mcp->mb[2]));
|
||||
} else {
|
||||
DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
|
||||
{
|
||||
int rval = 0;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
|
||||
|
||||
mcp->mb[0] = MBC_RESTART_MPI_FW;
|
||||
mcp->out_mb = MBX_0;
|
||||
mcp->in_mb = MBX_0|MBX_1;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
|
||||
__func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
|
||||
} else {
|
||||
DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
|
||||
dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
|
||||
|
||||
mcp->mb[0] = MBC_READ_SFP;
|
||||
mcp->mb[1] = dev;
|
||||
mcp->mb[2] = MSW(sfp_dma);
|
||||
mcp->mb[3] = LSW(sfp_dma);
|
||||
mcp->mb[6] = MSW(MSD(sfp_dma));
|
||||
mcp->mb[7] = LSW(MSD(sfp_dma));
|
||||
mcp->mb[8] = len;
|
||||
mcp->mb[9] = adr;
|
||||
mcp->mb[10] = opt;
|
||||
mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
if (opt & BIT_0)
|
||||
if (sfp)
|
||||
*sfp = mcp->mb[8];
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
|
||||
vha->host_no, rval, mcp->mb[0]));
|
||||
} else {
|
||||
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
|
||||
dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
|
||||
|
||||
if (opt & BIT_0)
|
||||
if (sfp)
|
||||
len = *sfp;
|
||||
|
||||
mcp->mb[0] = MBC_WRITE_SFP;
|
||||
mcp->mb[1] = dev;
|
||||
mcp->mb[2] = MSW(sfp_dma);
|
||||
mcp->mb[3] = LSW(sfp_dma);
|
||||
mcp->mb[6] = MSW(MSD(sfp_dma));
|
||||
mcp->mb[7] = LSW(MSD(sfp_dma));
|
||||
mcp->mb[8] = len;
|
||||
mcp->mb[9] = adr;
|
||||
mcp->mb[10] = opt;
|
||||
mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
|
||||
vha->host_no, rval, mcp->mb[0]));
|
||||
} else {
|
||||
DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
@@ -359,7 +359,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
|
||||
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
|
||||
struct qla_hw_data *ha = base_vha->hw;
|
||||
scsi_qla_host_t *vha;
|
||||
struct scsi_host_template *sht = &qla24xx_driver_template;
|
||||
struct scsi_host_template *sht = &qla2xxx_driver_template;
|
||||
struct Scsi_Host *host;
|
||||
|
||||
vha = qla2x00_create_host(sht, ha);
|
||||
@@ -584,6 +584,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
|
||||
struct req_que *req = NULL;
|
||||
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
||||
uint16_t que_id = 0;
|
||||
device_reg_t __iomem *reg;
|
||||
|
||||
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
|
||||
if (req == NULL) {
|
||||
@@ -631,6 +632,9 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
|
||||
req->ring_index = 0;
|
||||
req->cnt = req->length;
|
||||
req->id = que_id;
|
||||
reg = ISP_QUE_REG(ha, que_id);
|
||||
req->req_q_in = ®->isp25mq.req_q_in;
|
||||
req->req_q_out = ®->isp25mq.req_q_out;
|
||||
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
|
||||
@@ -658,7 +662,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
|
||||
int ret = 0;
|
||||
struct rsp_que *rsp = NULL;
|
||||
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
||||
uint16_t que_id = 0;;
|
||||
uint16_t que_id = 0;
|
||||
device_reg_t __iomem *reg;
|
||||
|
||||
rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
|
||||
if (rsp == NULL) {
|
||||
@@ -706,6 +711,9 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
|
||||
rsp->ring_ptr = rsp->ring;
|
||||
rsp->ring_index = 0;
|
||||
rsp->id = que_id;
|
||||
reg = ISP_QUE_REG(ha, que_id);
|
||||
rsp->rsp_q_in = ®->isp25mq.rsp_q_in;
|
||||
rsp->rsp_q_out = ®->isp25mq.rsp_q_out;
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
|
||||
ret = qla25xx_request_irq(rsp);
|
||||
|
@@ -104,9 +104,7 @@ static int qla2xxx_slave_alloc(struct scsi_device *);
|
||||
static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
|
||||
static void qla2xxx_scan_start(struct Scsi_Host *);
|
||||
static void qla2xxx_slave_destroy(struct scsi_device *);
|
||||
static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
|
||||
void (*fn)(struct scsi_cmnd *));
|
||||
static int qla24xx_queuecommand(struct scsi_cmnd *cmd,
|
||||
static int qla2xxx_queuecommand(struct scsi_cmnd *cmd,
|
||||
void (*fn)(struct scsi_cmnd *));
|
||||
static int qla2xxx_eh_abort(struct scsi_cmnd *);
|
||||
static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
|
||||
@@ -117,42 +115,10 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
|
||||
static int qla2x00_change_queue_depth(struct scsi_device *, int);
|
||||
static int qla2x00_change_queue_type(struct scsi_device *, int);
|
||||
|
||||
static struct scsi_host_template qla2x00_driver_template = {
|
||||
struct scsi_host_template qla2xxx_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = QLA2XXX_DRIVER_NAME,
|
||||
.queuecommand = qla2x00_queuecommand,
|
||||
|
||||
.eh_abort_handler = qla2xxx_eh_abort,
|
||||
.eh_device_reset_handler = qla2xxx_eh_device_reset,
|
||||
.eh_target_reset_handler = qla2xxx_eh_target_reset,
|
||||
.eh_bus_reset_handler = qla2xxx_eh_bus_reset,
|
||||
.eh_host_reset_handler = qla2xxx_eh_host_reset,
|
||||
|
||||
.slave_configure = qla2xxx_slave_configure,
|
||||
|
||||
.slave_alloc = qla2xxx_slave_alloc,
|
||||
.slave_destroy = qla2xxx_slave_destroy,
|
||||
.scan_finished = qla2xxx_scan_finished,
|
||||
.scan_start = qla2xxx_scan_start,
|
||||
.change_queue_depth = qla2x00_change_queue_depth,
|
||||
.change_queue_type = qla2x00_change_queue_type,
|
||||
.this_id = -1,
|
||||
.cmd_per_lun = 3,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.sg_tablesize = SG_ALL,
|
||||
|
||||
/*
|
||||
* The RISC allows for each command to transfer (2^32-1) bytes of data,
|
||||
* which equates to 0x800000 sectors.
|
||||
*/
|
||||
.max_sectors = 0xFFFF,
|
||||
.shost_attrs = qla2x00_host_attrs,
|
||||
};
|
||||
|
||||
struct scsi_host_template qla24xx_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = QLA2XXX_DRIVER_NAME,
|
||||
.queuecommand = qla24xx_queuecommand,
|
||||
.queuecommand = qla2xxx_queuecommand,
|
||||
|
||||
.eh_abort_handler = qla2xxx_eh_abort,
|
||||
.eh_device_reset_handler = qla2xxx_eh_device_reset,
|
||||
@@ -430,73 +396,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
||||
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
|
||||
struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
srb_t *sp;
|
||||
int rval;
|
||||
|
||||
if (unlikely(pci_channel_offline(ha->pdev))) {
|
||||
cmd->result = DID_REQUEUE << 16;
|
||||
goto qc_fail_command;
|
||||
}
|
||||
|
||||
rval = fc_remote_port_chkready(rport);
|
||||
if (rval) {
|
||||
cmd->result = rval;
|
||||
goto qc_fail_command;
|
||||
}
|
||||
|
||||
/* Close window on fcport/rport state-transitioning. */
|
||||
if (fcport->drport)
|
||||
goto qc_target_busy;
|
||||
|
||||
if (atomic_read(&fcport->state) != FCS_ONLINE) {
|
||||
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
|
||||
atomic_read(&vha->loop_state) == LOOP_DEAD) {
|
||||
cmd->result = DID_NO_CONNECT << 16;
|
||||
goto qc_fail_command;
|
||||
}
|
||||
goto qc_target_busy;
|
||||
}
|
||||
|
||||
spin_unlock_irq(vha->host->host_lock);
|
||||
|
||||
sp = qla2x00_get_new_sp(vha, fcport, cmd, done);
|
||||
if (!sp)
|
||||
goto qc_host_busy_lock;
|
||||
|
||||
rval = ha->isp_ops->start_scsi(sp);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qc_host_busy_free_sp;
|
||||
|
||||
spin_lock_irq(vha->host->host_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
qc_host_busy_free_sp:
|
||||
qla2x00_sp_free_dma(sp);
|
||||
mempool_free(sp, ha->srb_mempool);
|
||||
|
||||
qc_host_busy_lock:
|
||||
spin_lock_irq(vha->host->host_lock);
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
qc_target_busy:
|
||||
return SCSI_MLQUEUE_TARGET_BUSY;
|
||||
|
||||
qc_fail_command:
|
||||
done(cmd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
||||
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
|
||||
@@ -507,7 +407,10 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
int rval;
|
||||
|
||||
if (unlikely(pci_channel_offline(ha->pdev))) {
|
||||
cmd->result = DID_REQUEUE << 16;
|
||||
if (ha->pdev->error_state == pci_channel_io_frozen)
|
||||
cmd->result = DID_REQUEUE << 16;
|
||||
else
|
||||
cmd->result = DID_NO_CONNECT << 16;
|
||||
goto qc24_fail_command;
|
||||
}
|
||||
|
||||
@@ -635,6 +538,34 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
|
||||
return (return_status);
|
||||
}
|
||||
|
||||
int
|
||||
qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
|
||||
{
|
||||
int return_status;
|
||||
unsigned long wait_reset;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
|
||||
while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
|
||||
test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
|
||||
test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
|
||||
ha->dpc_active) && time_before(jiffies, wait_reset)) {
|
||||
|
||||
msleep(1000);
|
||||
|
||||
if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
|
||||
ha->flags.chip_reset_done)
|
||||
break;
|
||||
}
|
||||
if (ha->flags.chip_reset_done)
|
||||
return_status = QLA_SUCCESS;
|
||||
else
|
||||
return_status = QLA_FUNCTION_FAILED;
|
||||
|
||||
return return_status;
|
||||
}
|
||||
|
||||
/*
|
||||
* qla2x00_wait_for_loop_ready
|
||||
* Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
|
||||
@@ -1163,7 +1094,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
|
||||
continue;
|
||||
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
|
||||
sp = req->outstanding_cmds[cnt];
|
||||
if (sp && sp->fcport->vha == vha) {
|
||||
if (sp) {
|
||||
req->outstanding_cmds[cnt] = NULL;
|
||||
sp->cmd->result = res;
|
||||
qla2x00_sp_compl(ha, sp);
|
||||
@@ -1351,9 +1282,6 @@ static struct isp_operations qla2100_isp_ops = {
|
||||
.write_optrom = qla2x00_write_optrom_data,
|
||||
.get_flash_version = qla2x00_get_flash_version,
|
||||
.start_scsi = qla2x00_start_scsi,
|
||||
.wrt_req_reg = NULL,
|
||||
.wrt_rsp_reg = NULL,
|
||||
.rd_req_reg = NULL,
|
||||
};
|
||||
|
||||
static struct isp_operations qla2300_isp_ops = {
|
||||
@@ -1389,9 +1317,6 @@ static struct isp_operations qla2300_isp_ops = {
|
||||
.write_optrom = qla2x00_write_optrom_data,
|
||||
.get_flash_version = qla2x00_get_flash_version,
|
||||
.start_scsi = qla2x00_start_scsi,
|
||||
.wrt_req_reg = NULL,
|
||||
.wrt_rsp_reg = NULL,
|
||||
.rd_req_reg = NULL,
|
||||
};
|
||||
|
||||
static struct isp_operations qla24xx_isp_ops = {
|
||||
@@ -1427,9 +1352,6 @@ static struct isp_operations qla24xx_isp_ops = {
|
||||
.write_optrom = qla24xx_write_optrom_data,
|
||||
.get_flash_version = qla24xx_get_flash_version,
|
||||
.start_scsi = qla24xx_start_scsi,
|
||||
.wrt_req_reg = qla24xx_wrt_req_reg,
|
||||
.wrt_rsp_reg = qla24xx_wrt_rsp_reg,
|
||||
.rd_req_reg = qla24xx_rd_req_reg,
|
||||
};
|
||||
|
||||
static struct isp_operations qla25xx_isp_ops = {
|
||||
@@ -1465,9 +1387,6 @@ static struct isp_operations qla25xx_isp_ops = {
|
||||
.write_optrom = qla24xx_write_optrom_data,
|
||||
.get_flash_version = qla24xx_get_flash_version,
|
||||
.start_scsi = qla24xx_start_scsi,
|
||||
.wrt_req_reg = qla24xx_wrt_req_reg,
|
||||
.wrt_rsp_reg = qla24xx_wrt_rsp_reg,
|
||||
.rd_req_reg = qla24xx_rd_req_reg,
|
||||
};
|
||||
|
||||
static struct isp_operations qla81xx_isp_ops = {
|
||||
@@ -1493,8 +1412,8 @@ static struct isp_operations qla81xx_isp_ops = {
|
||||
.build_iocbs = NULL,
|
||||
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
||||
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
||||
.read_nvram = qla25xx_read_nvram_data,
|
||||
.write_nvram = qla25xx_write_nvram_data,
|
||||
.read_nvram = NULL,
|
||||
.write_nvram = NULL,
|
||||
.fw_dump = qla81xx_fw_dump,
|
||||
.beacon_on = qla24xx_beacon_on,
|
||||
.beacon_off = qla24xx_beacon_off,
|
||||
@@ -1503,9 +1422,6 @@ static struct isp_operations qla81xx_isp_ops = {
|
||||
.write_optrom = qla24xx_write_optrom_data,
|
||||
.get_flash_version = qla24xx_get_flash_version,
|
||||
.start_scsi = qla24xx_start_scsi,
|
||||
.wrt_req_reg = qla24xx_wrt_req_reg,
|
||||
.wrt_rsp_reg = qla24xx_wrt_rsp_reg,
|
||||
.rd_req_reg = qla24xx_rd_req_reg,
|
||||
};
|
||||
|
||||
static inline void
|
||||
@@ -1727,7 +1643,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
struct rsp_que *rsp = NULL;
|
||||
|
||||
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
|
||||
sht = &qla2x00_driver_template;
|
||||
sht = &qla2xxx_driver_template;
|
||||
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
|
||||
@@ -1736,7 +1652,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
|
||||
bars = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
sht = &qla24xx_driver_template;
|
||||
mem_only = 1;
|
||||
}
|
||||
|
||||
@@ -1927,10 +1842,16 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
ha->rsp_q_map[0] = rsp;
|
||||
ha->req_q_map[0] = req;
|
||||
|
||||
/* FWI2-capable only. */
|
||||
req->req_q_in = &ha->iobase->isp24.req_q_in;
|
||||
req->req_q_out = &ha->iobase->isp24.req_q_out;
|
||||
rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
|
||||
rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
|
||||
if (ha->mqenable) {
|
||||
ha->isp_ops->wrt_req_reg = qla25xx_wrt_req_reg;
|
||||
ha->isp_ops->wrt_rsp_reg = qla25xx_wrt_rsp_reg;
|
||||
ha->isp_ops->rd_req_reg = qla25xx_rd_req_reg;
|
||||
req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
|
||||
req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
|
||||
rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
|
||||
rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
|
||||
}
|
||||
|
||||
if (qla2x00_initialize_adapter(base_vha)) {
|
||||
@@ -2000,6 +1921,16 @@ probe_init_failed:
|
||||
ha->max_queues = 0;
|
||||
|
||||
probe_failed:
|
||||
if (base_vha->timer_active)
|
||||
qla2x00_stop_timer(base_vha);
|
||||
base_vha->flags.online = 0;
|
||||
if (ha->dpc_thread) {
|
||||
struct task_struct *t = ha->dpc_thread;
|
||||
|
||||
ha->dpc_thread = NULL;
|
||||
kthread_stop(t);
|
||||
}
|
||||
|
||||
qla2x00_free_device(base_vha);
|
||||
|
||||
scsi_host_put(base_vha->host);
|
||||
@@ -2033,10 +1964,30 @@ qla2x00_remove_one(struct pci_dev *pdev)
|
||||
|
||||
set_bit(UNLOADING, &base_vha->dpc_flags);
|
||||
|
||||
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
|
||||
|
||||
qla2x00_dfs_remove(base_vha);
|
||||
|
||||
qla84xx_put_chip(base_vha);
|
||||
|
||||
/* Disable timer */
|
||||
if (base_vha->timer_active)
|
||||
qla2x00_stop_timer(base_vha);
|
||||
|
||||
base_vha->flags.online = 0;
|
||||
|
||||
/* Kill the kernel thread for this host */
|
||||
if (ha->dpc_thread) {
|
||||
struct task_struct *t = ha->dpc_thread;
|
||||
|
||||
/*
|
||||
* qla2xxx_wake_dpc checks for ->dpc_thread
|
||||
* so we need to zero it out.
|
||||
*/
|
||||
ha->dpc_thread = NULL;
|
||||
kthread_stop(t);
|
||||
}
|
||||
|
||||
qla2x00_free_sysfs_attr(base_vha);
|
||||
|
||||
fc_remove_host(base_vha->host);
|
||||
@@ -2065,25 +2016,6 @@ static void
|
||||
qla2x00_free_device(scsi_qla_host_t *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
|
||||
|
||||
/* Disable timer */
|
||||
if (vha->timer_active)
|
||||
qla2x00_stop_timer(vha);
|
||||
|
||||
vha->flags.online = 0;
|
||||
|
||||
/* Kill the kernel thread for this host */
|
||||
if (ha->dpc_thread) {
|
||||
struct task_struct *t = ha->dpc_thread;
|
||||
|
||||
/*
|
||||
* qla2xxx_wake_dpc checks for ->dpc_thread
|
||||
* so we need to zero it out.
|
||||
*/
|
||||
ha->dpc_thread = NULL;
|
||||
kthread_stop(t);
|
||||
}
|
||||
|
||||
if (ha->flags.fce_enabled)
|
||||
qla2x00_disable_fce_trace(vha, NULL, NULL);
|
||||
@@ -2313,9 +2245,19 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
||||
} else
|
||||
ha->npiv_info = NULL;
|
||||
|
||||
/* Get consistent memory allocated for EX-INIT-CB. */
|
||||
if (IS_QLA81XX(ha)) {
|
||||
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
|
||||
&ha->ex_init_cb_dma);
|
||||
if (!ha->ex_init_cb)
|
||||
goto fail_ex_init_cb;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&ha->vp_list);
|
||||
return 1;
|
||||
|
||||
fail_ex_init_cb:
|
||||
kfree(ha->npiv_info);
|
||||
fail_npiv_info:
|
||||
dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
|
||||
sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
|
||||
@@ -2398,18 +2340,22 @@ qla2x00_mem_free(struct qla_hw_data *ha)
|
||||
if (ha->sfp_data)
|
||||
dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
|
||||
|
||||
if (ha->edc_data)
|
||||
dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
|
||||
|
||||
if (ha->ms_iocb)
|
||||
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
|
||||
|
||||
if (ha->ex_init_cb)
|
||||
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
|
||||
|
||||
if (ha->s_dma_pool)
|
||||
dma_pool_destroy(ha->s_dma_pool);
|
||||
|
||||
|
||||
if (ha->gid_list)
|
||||
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
|
||||
ha->gid_list_dma);
|
||||
|
||||
|
||||
if (ha->init_cb)
|
||||
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
|
||||
ha->init_cb, ha->init_cb_dma);
|
||||
@@ -2428,6 +2374,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
|
||||
ha->ms_iocb_dma = 0;
|
||||
ha->init_cb = NULL;
|
||||
ha->init_cb_dma = 0;
|
||||
ha->ex_init_cb = NULL;
|
||||
ha->ex_init_cb_dma = 0;
|
||||
|
||||
ha->s_dma_pool = NULL;
|
||||
|
||||
@@ -2914,19 +2862,11 @@ qla2x00_timer(scsi_qla_host_t *vha)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock,
|
||||
cpu_flags);
|
||||
}
|
||||
set_bit(ABORT_QUEUES_NEEDED, &vha->dpc_flags);
|
||||
start_dpc++;
|
||||
}
|
||||
|
||||
/* if the loop has been down for 4 minutes, reinit adapter */
|
||||
if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
|
||||
DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - "
|
||||
"restarting queues.\n",
|
||||
vha->host_no));
|
||||
|
||||
set_bit(RESTART_QUEUES_NEEDED, &vha->dpc_flags);
|
||||
start_dpc++;
|
||||
|
||||
if (!(vha->device_flags & DFLG_NO_CABLE) &&
|
||||
!vha->vp_idx) {
|
||||
DEBUG(printk("scsi(%ld): Loop down - "
|
||||
@@ -3053,6 +2993,8 @@ qla2x00_release_firmware(void)
|
||||
static pci_ers_result_t
|
||||
qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
{
|
||||
scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
|
||||
|
||||
switch (state) {
|
||||
case pci_channel_io_normal:
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
@@ -3060,7 +3002,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
pci_disable_device(pdev);
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
case pci_channel_io_perm_failure:
|
||||
qla2x00_remove_one(pdev);
|
||||
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
|
@@ -612,8 +612,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
|
||||
|
||||
/* Good data. Use specified location. */
|
||||
loc = locations[1];
|
||||
*start = le16_to_cpu(fltl->start_hi) << 16 |
|
||||
le16_to_cpu(fltl->start_lo);
|
||||
*start = (le16_to_cpu(fltl->start_hi) << 16 |
|
||||
le16_to_cpu(fltl->start_lo)) >> 2;
|
||||
end:
|
||||
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
|
||||
return QLA_SUCCESS;
|
||||
@@ -629,6 +629,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
|
||||
{ FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR_81 };
|
||||
const uint32_t def_vpd_nvram[] =
|
||||
{ FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR_81 };
|
||||
const uint32_t def_vpd0[] =
|
||||
{ 0, 0, FA_VPD0_ADDR_81 };
|
||||
const uint32_t def_vpd1[] =
|
||||
{ 0, 0, FA_VPD1_ADDR_81 };
|
||||
const uint32_t def_nvram0[] =
|
||||
{ 0, 0, FA_NVRAM0_ADDR_81 };
|
||||
const uint32_t def_nvram1[] =
|
||||
{ 0, 0, FA_NVRAM1_ADDR_81 };
|
||||
const uint32_t def_fdt[] =
|
||||
{ FA_FLASH_DESCR_ADDR_24, FA_FLASH_DESCR_ADDR,
|
||||
FA_FLASH_DESCR_ADDR_81 };
|
||||
@@ -693,6 +701,20 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
|
||||
break;
|
||||
case FLT_REG_VPD_0:
|
||||
ha->flt_region_vpd_nvram = start;
|
||||
if (!(PCI_FUNC(ha->pdev->devfn) & 1))
|
||||
ha->flt_region_vpd = start;
|
||||
break;
|
||||
case FLT_REG_VPD_1:
|
||||
if (PCI_FUNC(ha->pdev->devfn) & 1)
|
||||
ha->flt_region_vpd = start;
|
||||
break;
|
||||
case FLT_REG_NVRAM_0:
|
||||
if (!(PCI_FUNC(ha->pdev->devfn) & 1))
|
||||
ha->flt_region_nvram = start;
|
||||
break;
|
||||
case FLT_REG_NVRAM_1:
|
||||
if (PCI_FUNC(ha->pdev->devfn) & 1)
|
||||
ha->flt_region_nvram = start;
|
||||
break;
|
||||
case FLT_REG_FDT:
|
||||
ha->flt_region_fdt = start;
|
||||
@@ -722,13 +744,18 @@ no_flash_data:
|
||||
ha->flt_region_fw = def_fw[def];
|
||||
ha->flt_region_boot = def_boot[def];
|
||||
ha->flt_region_vpd_nvram = def_vpd_nvram[def];
|
||||
ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
|
||||
def_vpd0[def]: def_vpd1[def];
|
||||
ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
|
||||
def_nvram0[def]: def_nvram1[def];
|
||||
ha->flt_region_fdt = def_fdt[def];
|
||||
ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
|
||||
def_npiv_conf0[def]: def_npiv_conf1[def];
|
||||
done:
|
||||
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
|
||||
"vpd_nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x.\n", loc,
|
||||
ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
|
||||
"vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
|
||||
"npiv=0x%x.\n", loc, ha->flt_region_boot, ha->flt_region_fw,
|
||||
ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram,
|
||||
ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf));
|
||||
}
|
||||
|
||||
@@ -931,31 +958,41 @@ done:
|
||||
ha->npiv_info = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
qla24xx_unprotect_flash(struct qla_hw_data *ha)
|
||||
static int
|
||||
qla24xx_unprotect_flash(scsi_qla_host_t *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
|
||||
if (ha->flags.fac_supported)
|
||||
return qla81xx_fac_do_write_enable(vha, 1);
|
||||
|
||||
/* Enable flash write. */
|
||||
WRT_REG_DWORD(®->ctrl_status,
|
||||
RD_REG_DWORD(®->ctrl_status) | CSRX_FLASH_ENABLE);
|
||||
RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */
|
||||
|
||||
if (!ha->fdt_wrt_disable)
|
||||
return;
|
||||
goto done;
|
||||
|
||||
/* Disable flash write-protection, first clear SR protection bit */
|
||||
qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
|
||||
/* Then write zero again to clear remaining SR bits.*/
|
||||
qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
|
||||
done:
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
static void
|
||||
qla24xx_protect_flash(struct qla_hw_data *ha)
|
||||
static int
|
||||
qla24xx_protect_flash(scsi_qla_host_t *vha)
|
||||
{
|
||||
uint32_t cnt;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
|
||||
if (ha->flags.fac_supported)
|
||||
return qla81xx_fac_do_write_enable(vha, 0);
|
||||
|
||||
if (!ha->fdt_wrt_disable)
|
||||
goto skip_wrt_protect;
|
||||
|
||||
@@ -973,6 +1010,26 @@ skip_wrt_protect:
|
||||
WRT_REG_DWORD(®->ctrl_status,
|
||||
RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE);
|
||||
RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */
|
||||
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
static int
|
||||
qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
uint32_t start, finish;
|
||||
|
||||
if (ha->flags.fac_supported) {
|
||||
start = fdata >> 2;
|
||||
finish = start + (ha->fdt_block_size >> 2) - 1;
|
||||
return qla81xx_fac_erase_sector(vha, flash_data_addr(ha,
|
||||
start), flash_data_addr(ha, finish));
|
||||
}
|
||||
|
||||
return qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd,
|
||||
(fdata & 0xff00) | ((fdata << 16) & 0xff0000) |
|
||||
((fdata >> 16) & 0xff));
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -987,8 +1044,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
void *optrom = NULL;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
ret = QLA_SUCCESS;
|
||||
|
||||
/* Prepare burst-capable write on supported ISPs. */
|
||||
if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) &&
|
||||
dwords > OPTROM_BURST_DWORDS) {
|
||||
@@ -1004,7 +1059,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
rest_addr = (ha->fdt_block_size >> 2) - 1;
|
||||
sec_mask = ~rest_addr;
|
||||
|
||||
qla24xx_unprotect_flash(ha);
|
||||
ret = qla24xx_unprotect_flash(vha);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to unprotect flash for update.\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
|
||||
fdata = (faddr & sec_mask) << 2;
|
||||
@@ -1017,9 +1077,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
ha->fdt_unprotect_sec_cmd,
|
||||
(fdata & 0xff00) | ((fdata << 16) &
|
||||
0xff0000) | ((fdata >> 16) & 0xff));
|
||||
ret = qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd,
|
||||
(fdata & 0xff00) |((fdata << 16) &
|
||||
0xff0000) | ((fdata >> 16) & 0xff));
|
||||
ret = qla24xx_erase_sector(vha, fdata);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
DEBUG9(qla_printk("Unable to erase sector: "
|
||||
"address=%x.\n", faddr));
|
||||
@@ -1073,8 +1131,11 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
0xff0000) | ((fdata >> 16) & 0xff));
|
||||
}
|
||||
|
||||
qla24xx_protect_flash(ha);
|
||||
|
||||
ret = qla24xx_protect_flash(vha);
|
||||
if (ret != QLA_SUCCESS)
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to protect flash after update.\n");
|
||||
done:
|
||||
if (optrom)
|
||||
dma_free_coherent(&ha->pdev->dev,
|
||||
OPTROM_BURST_SIZE, optrom, optrom_dma);
|
||||
@@ -1915,7 +1976,7 @@ qla2x00_resume_hba(struct scsi_qla_host *vha)
|
||||
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
qla2x00_wait_for_hba_online(vha);
|
||||
qla2x00_wait_for_chip_reset(vha);
|
||||
scsi_unblock_requests(vha->host);
|
||||
}
|
||||
|
||||
@@ -2206,11 +2267,7 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
|
||||
rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
|
||||
length >> 2);
|
||||
|
||||
/* Resume HBA -- RISC reset needed. */
|
||||
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
qla2x00_wait_for_hba_online(vha);
|
||||
scsi_unblock_requests(vha->host);
|
||||
|
||||
return rval;
|
||||
@@ -2518,7 +2575,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
dcode = mbuf;
|
||||
|
||||
/* Begin with first PCI expansion ROM header. */
|
||||
pcihdr = ha->flt_region_boot;
|
||||
pcihdr = ha->flt_region_boot << 2;
|
||||
last_image = 1;
|
||||
do {
|
||||
/* Verify PCI expansion ROM header. */
|
||||
|
@@ -7,9 +7,9 @@
|
||||
/*
|
||||
* Driver version
|
||||
*/
|
||||
#define QLA2XXX_VERSION "8.03.00-k4"
|
||||
#define QLA2XXX_VERSION "8.03.01-k1"
|
||||
|
||||
#define QLA_DRIVER_MAJOR_VER 8
|
||||
#define QLA_DRIVER_MINOR_VER 3
|
||||
#define QLA_DRIVER_PATCH_VER 0
|
||||
#define QLA_DRIVER_PATCH_VER 1
|
||||
#define QLA_DRIVER_BETA_VER 0
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
@@ -53,8 +54,6 @@
|
||||
|
||||
#define DEFAULT_LOOP_COUNT 10000
|
||||
|
||||
#include "qlogicpti_asm.c"
|
||||
|
||||
static struct qlogicpti *qptichain = NULL;
|
||||
static DEFINE_SPINLOCK(qptichain_lock);
|
||||
|
||||
@@ -465,16 +464,32 @@ static int qlogicpti_reset_hardware(struct Scsi_Host *host)
|
||||
|
||||
static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
const char fwname[] = "qlogic/isp1000.bin";
|
||||
const __le16 *fw_data;
|
||||
struct Scsi_Host *host = qpti->qhost;
|
||||
unsigned short csum = 0;
|
||||
unsigned short param[6];
|
||||
unsigned short *risc_code, risc_code_addr, risc_code_length;
|
||||
unsigned short risc_code_addr, risc_code_length;
|
||||
int err;
|
||||
unsigned long flags;
|
||||
int i, timeout;
|
||||
|
||||
risc_code = &sbus_risc_code01[0];
|
||||
err = request_firmware(&fw, fwname, &qpti->op->dev);
|
||||
if (err) {
|
||||
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
|
||||
fwname, err);
|
||||
return err;
|
||||
}
|
||||
if (fw->size % 2) {
|
||||
printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
|
||||
fw->size, fwname);
|
||||
err = -EINVAL;
|
||||
goto outfirm;
|
||||
}
|
||||
fw_data = (const __le16 *)&fw->data[0];
|
||||
risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */
|
||||
risc_code_length = sbus_risc_code_length01;
|
||||
risc_code_length = fw->size / 2;
|
||||
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
|
||||
@@ -482,12 +497,12 @@ static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
|
||||
* afterwards via the mailbox commands.
|
||||
*/
|
||||
for (i = 0; i < risc_code_length; i++)
|
||||
csum += risc_code[i];
|
||||
csum += __le16_to_cpu(fw_data[i]);
|
||||
if (csum) {
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!",
|
||||
qpti->qpti_id);
|
||||
return 1;
|
||||
err = 1;
|
||||
goto out;
|
||||
}
|
||||
sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
|
||||
sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
|
||||
@@ -496,9 +511,9 @@ static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
|
||||
while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET))
|
||||
udelay(20);
|
||||
if (!timeout) {
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id);
|
||||
return 1;
|
||||
err = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
|
||||
@@ -536,21 +551,21 @@ static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
|
||||
if (qlogicpti_mbox_command(qpti, param, 1)) {
|
||||
printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n",
|
||||
qpti->qpti_id);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
return 1;
|
||||
err = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Load it up.. */
|
||||
for (i = 0; i < risc_code_length; i++) {
|
||||
param[0] = MBOX_WRITE_RAM_WORD;
|
||||
param[1] = risc_code_addr + i;
|
||||
param[2] = risc_code[i];
|
||||
param[2] = __le16_to_cpu(fw_data[i]);
|
||||
if (qlogicpti_mbox_command(qpti, param, 1) ||
|
||||
param[0] != MBOX_COMMAND_COMPLETE) {
|
||||
printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
|
||||
qpti->qpti_id);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
return 1;
|
||||
err = 1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -569,8 +584,8 @@ static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
|
||||
(param[0] != MBOX_COMMAND_COMPLETE)) {
|
||||
printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n",
|
||||
qpti->qpti_id);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
return 1;
|
||||
err = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Start using newly downloaded firmware. */
|
||||
@@ -583,8 +598,8 @@ static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
|
||||
(param[0] != MBOX_COMMAND_COMPLETE)) {
|
||||
printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n",
|
||||
qpti->qpti_id);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
return 1;
|
||||
err = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Snag the major and minor revisions from the result. */
|
||||
@@ -599,8 +614,8 @@ static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
|
||||
(param[0] != MBOX_COMMAND_COMPLETE)) {
|
||||
printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n",
|
||||
qpti->qpti_id);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
return 1;
|
||||
err = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (qpti->is_pti != 0) {
|
||||
@@ -616,8 +631,11 @@ static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
|
||||
qlogicpti_mbox_command(qpti, param, 1);
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
return 0;
|
||||
outfirm:
|
||||
release_firmware(fw);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
|
||||
@@ -1458,6 +1476,7 @@ MODULE_DESCRIPTION("QlogicISP SBUS driver");
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("2.1");
|
||||
MODULE_FIRMWARE("qlogic/isp1000.bin");
|
||||
|
||||
module_init(qpti_init);
|
||||
module_exit(qpti_exit);
|
||||
|
File diff soppresso perché troppo grande
Carica Diff
@@ -169,12 +169,10 @@ scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
|
||||
{
|
||||
struct scsi_cmnd *cmd;
|
||||
|
||||
cmd = kmem_cache_alloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
|
||||
cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
|
||||
if (!cmd)
|
||||
return NULL;
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
|
||||
cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
|
||||
gfp_mask | pool->gfp_mask);
|
||||
if (!cmd->sense_buffer) {
|
||||
|
@@ -791,7 +791,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
"%d bytes done.\n",
|
||||
req->nr_sectors, good_bytes));
|
||||
|
||||
/* A number of bytes were successfully read. If there
|
||||
/*
|
||||
* Recovered errors need reporting, but they're always treated
|
||||
* as success, so fiddle the result code here. For BLOCK_PC
|
||||
* we already took a copy of the original into rq->errors which
|
||||
* is what gets returned to the user
|
||||
*/
|
||||
if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
|
||||
if (!(req->cmd_flags & REQ_QUIET))
|
||||
scsi_print_sense("", cmd);
|
||||
result = 0;
|
||||
/* BLOCK_PC may have set error */
|
||||
error = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* A number of bytes were successfully read. If there
|
||||
* are leftovers and there is some kind of error
|
||||
* (result != 0), retry the rest.
|
||||
*/
|
||||
|
@@ -1051,12 +1051,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
|
||||
good_bytes = sd_completed_bytes(SCpnt);
|
||||
break;
|
||||
case RECOVERED_ERROR:
|
||||
/* Inform the user, but make sure that it's not treated
|
||||
* as a hard error.
|
||||
*/
|
||||
scsi_print_sense("sd", SCpnt);
|
||||
SCpnt->result = 0;
|
||||
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
good_bytes = scsi_bufflen(SCpnt);
|
||||
break;
|
||||
case NO_SENSE:
|
||||
|
@@ -264,6 +264,7 @@ struct ses_host_edev {
|
||||
struct enclosure_device *edev;
|
||||
};
|
||||
|
||||
#if 0
|
||||
int ses_match_host(struct enclosure_device *edev, void *data)
|
||||
{
|
||||
struct ses_host_edev *sed = data;
|
||||
@@ -280,6 +281,7 @@ int ses_match_host(struct enclosure_device *edev, void *data)
|
||||
sed->edev = edev;
|
||||
return 1;
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
||||
static void ses_process_descriptor(struct enclosure_component *ecomp,
|
||||
unsigned char *desc)
|
||||
|
@@ -1312,8 +1312,10 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
|
||||
wake_up_interruptible(&sfp->read_wait);
|
||||
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
|
||||
kref_put(&sfp->f_ref, sg_remove_sfp);
|
||||
} else
|
||||
execute_in_process_context(sg_rq_end_io_usercontext, &srp->ew);
|
||||
} else {
|
||||
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
|
||||
schedule_work(&srp->ew.work);
|
||||
}
|
||||
}
|
||||
|
||||
static struct file_operations sg_fops = {
|
||||
@@ -1656,10 +1658,30 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
|
||||
md->null_mapped = hp->dxferp ? 0 : 1;
|
||||
}
|
||||
|
||||
if (iov_count)
|
||||
res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
|
||||
hp->dxfer_len, GFP_ATOMIC);
|
||||
else
|
||||
if (iov_count) {
|
||||
int len, size = sizeof(struct sg_iovec) * iov_count;
|
||||
struct iovec *iov;
|
||||
|
||||
iov = kmalloc(size, GFP_ATOMIC);
|
||||
if (!iov)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(iov, hp->dxferp, size)) {
|
||||
kfree(iov);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
len = iov_length(iov, iov_count);
|
||||
if (hp->dxfer_len < len) {
|
||||
iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
|
||||
len = hp->dxfer_len;
|
||||
}
|
||||
|
||||
res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
|
||||
iov_count,
|
||||
len, GFP_ATOMIC);
|
||||
kfree(iov);
|
||||
} else
|
||||
res = blk_rq_map_user(q, rq, md, hp->dxferp,
|
||||
hp->dxfer_len, GFP_ATOMIC);
|
||||
|
||||
@@ -2079,7 +2101,8 @@ static void sg_remove_sfp(struct kref *kref)
|
||||
write_unlock_irqrestore(&sg_index_lock, iflags);
|
||||
wake_up_interruptible(&sdp->o_excl_wait);
|
||||
|
||||
execute_in_process_context(sg_remove_sfp_usercontext, &sfp->ew);
|
||||
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
|
||||
schedule_work(&sfp->ew.work);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@@ -309,15 +309,6 @@ static int sr_done(struct scsi_cmnd *SCpnt)
|
||||
break;
|
||||
|
||||
case RECOVERED_ERROR:
|
||||
|
||||
/*
|
||||
* An error occured, but it recovered. Inform the
|
||||
* user, but make sure that it's not treated as a
|
||||
* hard error.
|
||||
*/
|
||||
scsi_print_sense("sr", SCpnt);
|
||||
SCpnt->result = 0;
|
||||
SCpnt->sense_buffer[0] = 0x0;
|
||||
good_bytes = this_count;
|
||||
break;
|
||||
|
||||
|
File diff soppresso perché troppo grande
Carica Diff
@@ -234,7 +234,7 @@ static inline struct sym_hcb * sym_get_hcb(struct Scsi_Host *host)
|
||||
/*
|
||||
* Set the status field of a CAM CCB.
|
||||
*/
|
||||
static __inline void
|
||||
static inline void
|
||||
sym_set_cam_status(struct scsi_cmnd *cmd, int status)
|
||||
{
|
||||
cmd->result &= ~(0xff << 16);
|
||||
@@ -244,7 +244,7 @@ sym_set_cam_status(struct scsi_cmnd *cmd, int status)
|
||||
/*
|
||||
* Get the status field of a CAM CCB.
|
||||
*/
|
||||
static __inline int
|
||||
static inline int
|
||||
sym_get_cam_status(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return host_byte(cmd->result);
|
||||
@@ -253,7 +253,7 @@ sym_get_cam_status(struct scsi_cmnd *cmd)
|
||||
/*
|
||||
* Build CAM result for a successful IO and for a failed IO.
|
||||
*/
|
||||
static __inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
|
||||
static inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
|
||||
{
|
||||
scsi_set_resid(cmd, resid);
|
||||
cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
|
||||
|
@@ -602,7 +602,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa
|
||||
/*
|
||||
* Set initial io register bits from burst code.
|
||||
*/
|
||||
static __inline void sym_init_burst(struct sym_hcb *np, u_char bc)
|
||||
static inline void sym_init_burst(struct sym_hcb *np, u_char bc)
|
||||
{
|
||||
np->rv_ctest4 &= ~0x80;
|
||||
np->rv_dmode &= ~(0x3 << 6);
|
||||
|
@@ -1096,7 +1096,7 @@ do { \
|
||||
#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
|
||||
#define DMA_DAC_MASK DMA_64BIT_MASK
|
||||
int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
|
||||
static __inline void
|
||||
static inline void
|
||||
sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
|
||||
{
|
||||
u32 h = (badd>>32);
|
||||
@@ -1201,7 +1201,7 @@ dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m);
|
||||
|
||||
#define sym_m_pool_match(mp_id1, mp_id2) (mp_id1 == mp_id2)
|
||||
|
||||
static __inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
|
||||
static inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
|
||||
{
|
||||
void *vaddr = NULL;
|
||||
dma_addr_t baddr = 0;
|
||||
@@ -1215,7 +1215,7 @@ static __inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static __inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
|
||||
static inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
|
||||
{
|
||||
dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr,
|
||||
vbp->baddr);
|
||||
|
@@ -262,7 +262,7 @@ static void ___free_dma_mem_cluster(m_pool_p mp, void *m)
|
||||
#endif
|
||||
|
||||
/* Fetch the memory pool for a given pool id (i.e. DMA constraints) */
|
||||
static __inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat)
|
||||
static inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat)
|
||||
{
|
||||
m_pool_p mp;
|
||||
for (mp = mp0.next;
|
||||
|
@@ -52,17 +52,17 @@ typedef struct sym_quehead {
|
||||
(ptr)->flink = (ptr); (ptr)->blink = (ptr); \
|
||||
} while (0)
|
||||
|
||||
static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head)
|
||||
static inline struct sym_quehead *sym_que_first(struct sym_quehead *head)
|
||||
{
|
||||
return (head->flink == head) ? 0 : head->flink;
|
||||
}
|
||||
|
||||
static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head)
|
||||
static inline struct sym_quehead *sym_que_last(struct sym_quehead *head)
|
||||
{
|
||||
return (head->blink == head) ? 0 : head->blink;
|
||||
}
|
||||
|
||||
static __inline void __sym_que_add(struct sym_quehead * new,
|
||||
static inline void __sym_que_add(struct sym_quehead * new,
|
||||
struct sym_quehead * blink,
|
||||
struct sym_quehead * flink)
|
||||
{
|
||||
@@ -72,19 +72,19 @@ static __inline void __sym_que_add(struct sym_quehead * new,
|
||||
blink->flink = new;
|
||||
}
|
||||
|
||||
static __inline void __sym_que_del(struct sym_quehead * blink,
|
||||
static inline void __sym_que_del(struct sym_quehead * blink,
|
||||
struct sym_quehead * flink)
|
||||
{
|
||||
flink->blink = blink;
|
||||
blink->flink = flink;
|
||||
}
|
||||
|
||||
static __inline int sym_que_empty(struct sym_quehead *head)
|
||||
static inline int sym_que_empty(struct sym_quehead *head)
|
||||
{
|
||||
return head->flink == head;
|
||||
}
|
||||
|
||||
static __inline void sym_que_splice(struct sym_quehead *list,
|
||||
static inline void sym_que_splice(struct sym_quehead *list,
|
||||
struct sym_quehead *head)
|
||||
{
|
||||
struct sym_quehead *first = list->flink;
|
||||
@@ -101,7 +101,7 @@ static __inline void sym_que_splice(struct sym_quehead *list,
|
||||
}
|
||||
}
|
||||
|
||||
static __inline void sym_que_move(struct sym_quehead *orig,
|
||||
static inline void sym_que_move(struct sym_quehead *orig,
|
||||
struct sym_quehead *dest)
|
||||
{
|
||||
struct sym_quehead *first, *last;
|
||||
@@ -129,7 +129,7 @@ static __inline void sym_que_move(struct sym_quehead *orig,
|
||||
|
||||
#define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink)
|
||||
|
||||
static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
|
||||
static inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
|
||||
{
|
||||
struct sym_quehead *elem = head->flink;
|
||||
|
||||
@@ -142,7 +142,7 @@ static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
|
||||
|
||||
#define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head)
|
||||
|
||||
static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head)
|
||||
static inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head)
|
||||
{
|
||||
struct sym_quehead *elem = head->blink;
|
||||
|
||||
|
Fai riferimento in un nuovo problema
Block a user