Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
This commit is contained in:
8
drivers/scsi/aacraid/Makefile
Normal file
8
drivers/scsi/aacraid/Makefile
Normal file
@@ -0,0 +1,8 @@
|
||||
# Adaptec aacraid
|
||||
|
||||
obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
|
||||
|
||||
aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
|
||||
dpcsup.o rx.o sa.o rkt.o
|
||||
|
||||
EXTRA_CFLAGS := -Idrivers/scsi
|
66
drivers/scsi/aacraid/README
Normal file
66
drivers/scsi/aacraid/README
Normal file
@@ -0,0 +1,66 @@
|
||||
AACRAID Driver for Linux (take two)
|
||||
|
||||
Introduction
|
||||
-------------------------
|
||||
The aacraid driver adds support for Adaptec (http://www.adaptec.com)
|
||||
RAID controllers. This is a major rewrite from the original
|
||||
Adaptec supplied driver. It has signficantly cleaned up both the code
|
||||
and the running binary size (the module is less than half the size of
|
||||
the original).
|
||||
|
||||
Supported Cards/Chipsets
|
||||
-------------------------
|
||||
Adaptec 2020S
|
||||
Adaptec 2025S
|
||||
Adaptec 2120S
|
||||
Adaptec 2200S
|
||||
Adaptec 2230S
|
||||
Adaptec 2240S
|
||||
Adaptec 2410SA
|
||||
Adaptec 2610SA
|
||||
Adaptec 2810SA
|
||||
Adaptec 21610SA
|
||||
Adaptec 3230S
|
||||
Adaptec 3240S
|
||||
Adaptec 4000SAS
|
||||
Adaptec 4005SAS
|
||||
Adaptec 4800SAS
|
||||
Adaptec 4805SAS
|
||||
Adaptec 5400S
|
||||
Dell PERC 2 Quad Channel
|
||||
Dell PERC 2/Si
|
||||
Dell PERC 3/Si
|
||||
Dell PERC 3/Di
|
||||
Dell CERC 2
|
||||
HP NetRAID-4M
|
||||
Legend S220
|
||||
Legend S230
|
||||
|
||||
People
|
||||
-------------------------
|
||||
Alan Cox <alan@redhat.com>
|
||||
Christoph Hellwig <hch@infradead.org> (updates for new-style PCI probing and SCSI host registration,
|
||||
small cleanups/fixes)
|
||||
Matt Domsch <matt_domsch@dell.com> (revision ioctl, adapter messages)
|
||||
Deanna Bonds (non-DASD support, PAE fibs and 64 bit, added new adaptec controllers
|
||||
added new ioctls, changed scsi interface to use new error handler,
|
||||
increased the number of fibs and outstanding commands to a container)
|
||||
|
||||
(fixed 64bit and 64G memory model, changed confusing naming convention
|
||||
where fibs that go to the hardware are consistently called hw_fibs and
|
||||
not just fibs like the name of the driver tracking structure)
|
||||
Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas.
|
||||
|
||||
Original Driver
|
||||
-------------------------
|
||||
Adaptec Unix OEM Product Group
|
||||
|
||||
Mailing List
|
||||
-------------------------
|
||||
linux-scsi@vger.kernel.org (Interested parties troll here)
|
||||
Also note this is very different to Brian's original driver
|
||||
so don't expect him to support it.
|
||||
Adaptec does support this driver. Contact either tech support or Mark Salyzyn.
|
||||
|
||||
Original by Brian Boerner February 2001
|
||||
Rewritten by Alan Cox, November 2001
|
6
drivers/scsi/aacraid/TODO
Normal file
6
drivers/scsi/aacraid/TODO
Normal file
@@ -0,0 +1,6 @@
|
||||
o Testing
|
||||
o More testing
|
||||
o Feature request: display the firmware/bios/etc revisions in the
|
||||
/proc info
|
||||
o Drop irq_mask, basically unused
|
||||
o I/O size increase
|
2037
drivers/scsi/aacraid/aachba.c
Normal file
2037
drivers/scsi/aacraid/aachba.c
Normal file
File diff soppresso perché troppo grande
Carica Diff
1623
drivers/scsi/aacraid/aacraid.h
Normal file
1623
drivers/scsi/aacraid/aacraid.h
Normal file
File diff soppresso perché troppo grande
Carica Diff
683
drivers/scsi/aacraid/commctrl.c
Normal file
683
drivers/scsi/aacraid/commctrl.c
Normal file
@@ -0,0 +1,683 @@
|
||||
/*
|
||||
* Adaptec AAC series RAID controller driver
|
||||
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
|
||||
*
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Module Name:
|
||||
* commctrl.c
|
||||
*
|
||||
* Abstract: Contains all routines for control of the AFA comm layer
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
/**
|
||||
* ioctl_send_fib - send a FIB from userspace
|
||||
* @dev: adapter is being processed
|
||||
* @arg: arguments to the ioctl call
|
||||
*
|
||||
* This routine sends a fib to the adapter on behalf of a user level
|
||||
* program.
|
||||
*/
|
||||
|
||||
static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
|
||||
{
|
||||
struct hw_fib * kfib;
|
||||
struct fib *fibptr;
|
||||
|
||||
fibptr = fib_alloc(dev);
|
||||
if(fibptr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
kfib = fibptr->hw_fib;
|
||||
/*
|
||||
* First copy in the header so that we can check the size field.
|
||||
*/
|
||||
if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
|
||||
fib_free(fibptr);
|
||||
return -EFAULT;
|
||||
}
|
||||
/*
|
||||
* Since we copy based on the fib header size, make sure that we
|
||||
* will not overrun the buffer when we copy the memory. Return
|
||||
* an error if we would.
|
||||
*/
|
||||
if (le16_to_cpu(kfib->header.Size) >
|
||||
sizeof(struct hw_fib) - sizeof(struct aac_fibhdr)) {
|
||||
fib_free(fibptr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (copy_from_user(kfib, arg, le16_to_cpu(kfib->header.Size) +
|
||||
sizeof(struct aac_fibhdr))) {
|
||||
fib_free(fibptr);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (kfib->header.Command == cpu_to_le32(TakeABreakPt)) {
|
||||
aac_adapter_interrupt(dev);
|
||||
/*
|
||||
* Since we didn't really send a fib, zero out the state to allow
|
||||
* cleanup code not to assert.
|
||||
*/
|
||||
kfib->header.XferState = 0;
|
||||
} else {
|
||||
int retval = fib_send(kfib->header.Command, fibptr,
|
||||
le16_to_cpu(kfib->header.Size) , FsaNormal,
|
||||
1, 1, NULL, NULL);
|
||||
if (retval) {
|
||||
fib_free(fibptr);
|
||||
return retval;
|
||||
}
|
||||
if (fib_complete(fibptr) != 0) {
|
||||
fib_free(fibptr);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Make sure that the size returned by the adapter (which includes
|
||||
* the header) is less than or equal to the size of a fib, so we
|
||||
* don't corrupt application data. Then copy that size to the user
|
||||
* buffer. (Don't try to add the header information again, since it
|
||||
* was already included by the adapter.)
|
||||
*/
|
||||
|
||||
if (copy_to_user(arg, (void *)kfib, kfib->header.Size)) {
|
||||
fib_free(fibptr);
|
||||
return -EFAULT;
|
||||
}
|
||||
fib_free(fibptr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* open_getadapter_fib - Get the next fib
|
||||
*
|
||||
* This routine will get the next Fib, if available, from the AdapterFibContext
|
||||
* passed in from the user.
|
||||
*/
|
||||
|
||||
static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
|
||||
{
|
||||
struct aac_fib_context * fibctx;
|
||||
int status;
|
||||
|
||||
fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
|
||||
if (fibctx == NULL) {
|
||||
status = -ENOMEM;
|
||||
} else {
|
||||
unsigned long flags;
|
||||
struct list_head * entry;
|
||||
struct aac_fib_context * context;
|
||||
|
||||
fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
|
||||
fibctx->size = sizeof(struct aac_fib_context);
|
||||
/*
|
||||
* Yes yes, I know this could be an index, but we have a
|
||||
* better guarantee of uniqueness for the locked loop below.
|
||||
* Without the aid of a persistent history, this also helps
|
||||
* reduce the chance that the opaque context would be reused.
|
||||
*/
|
||||
fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
|
||||
/*
|
||||
* Initialize the mutex used to wait for the next AIF.
|
||||
*/
|
||||
init_MUTEX_LOCKED(&fibctx->wait_sem);
|
||||
fibctx->wait = 0;
|
||||
/*
|
||||
* Initialize the fibs and set the count of fibs on
|
||||
* the list to 0.
|
||||
*/
|
||||
fibctx->count = 0;
|
||||
INIT_LIST_HEAD(&fibctx->fib_list);
|
||||
fibctx->jiffies = jiffies/HZ;
|
||||
/*
|
||||
* Now add this context onto the adapter's
|
||||
* AdapterFibContext list.
|
||||
*/
|
||||
spin_lock_irqsave(&dev->fib_lock, flags);
|
||||
/* Ensure that we have a unique identifier */
|
||||
entry = dev->fib_list.next;
|
||||
while (entry != &dev->fib_list) {
|
||||
context = list_entry(entry, struct aac_fib_context, next);
|
||||
if (context->unique == fibctx->unique) {
|
||||
/* Not unique (32 bits) */
|
||||
fibctx->unique++;
|
||||
entry = dev->fib_list.next;
|
||||
} else {
|
||||
entry = entry->next;
|
||||
}
|
||||
}
|
||||
list_add_tail(&fibctx->next, &dev->fib_list);
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
if (copy_to_user(arg, &fibctx->unique,
|
||||
sizeof(fibctx->unique))) {
|
||||
status = -EFAULT;
|
||||
} else {
|
||||
status = 0;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* next_getadapter_fib - get the next fib
|
||||
* @dev: adapter to use
|
||||
* @arg: ioctl argument
|
||||
*
|
||||
* This routine will get the next Fib, if available, from the AdapterFibContext
|
||||
* passed in from the user.
|
||||
*/
|
||||
|
||||
static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
|
||||
{
|
||||
struct fib_ioctl f;
|
||||
struct fib *fib;
|
||||
struct aac_fib_context *fibctx;
|
||||
int status;
|
||||
struct list_head * entry;
|
||||
unsigned long flags;
|
||||
|
||||
if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
|
||||
return -EFAULT;
|
||||
/*
|
||||
* Verify that the HANDLE passed in was a valid AdapterFibContext
|
||||
*
|
||||
* Search the list of AdapterFibContext addresses on the adapter
|
||||
* to be sure this is a valid address
|
||||
*/
|
||||
entry = dev->fib_list.next;
|
||||
fibctx = NULL;
|
||||
|
||||
while (entry != &dev->fib_list) {
|
||||
fibctx = list_entry(entry, struct aac_fib_context, next);
|
||||
/*
|
||||
* Extract the AdapterFibContext from the Input parameters.
|
||||
*/
|
||||
if (fibctx->unique == f.fibctx) { /* We found a winner */
|
||||
break;
|
||||
}
|
||||
entry = entry->next;
|
||||
fibctx = NULL;
|
||||
}
|
||||
if (!fibctx) {
|
||||
dprintk ((KERN_INFO "Fib Context not found\n"));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
|
||||
(fibctx->size != sizeof(struct aac_fib_context))) {
|
||||
dprintk ((KERN_INFO "Fib Context corrupt?\n"));
|
||||
return -EINVAL;
|
||||
}
|
||||
status = 0;
|
||||
spin_lock_irqsave(&dev->fib_lock, flags);
|
||||
/*
|
||||
* If there are no fibs to send back, then either wait or return
|
||||
* -EAGAIN
|
||||
*/
|
||||
return_fib:
|
||||
if (!list_empty(&fibctx->fib_list)) {
|
||||
struct list_head * entry;
|
||||
/*
|
||||
* Pull the next fib from the fibs
|
||||
*/
|
||||
entry = fibctx->fib_list.next;
|
||||
list_del(entry);
|
||||
|
||||
fib = list_entry(entry, struct fib, fiblink);
|
||||
fibctx->count--;
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) {
|
||||
kfree(fib->hw_fib);
|
||||
kfree(fib);
|
||||
return -EFAULT;
|
||||
}
|
||||
/*
|
||||
* Free the space occupied by this copy of the fib.
|
||||
*/
|
||||
kfree(fib->hw_fib);
|
||||
kfree(fib);
|
||||
status = 0;
|
||||
fibctx->jiffies = jiffies/HZ;
|
||||
} else {
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
if (f.wait) {
|
||||
if(down_interruptible(&fibctx->wait_sem) < 0) {
|
||||
status = -EINTR;
|
||||
} else {
|
||||
/* Lock again and retry */
|
||||
spin_lock_irqsave(&dev->fib_lock, flags);
|
||||
goto return_fib;
|
||||
}
|
||||
} else {
|
||||
status = -EAGAIN;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
|
||||
{
|
||||
struct fib *fib;
|
||||
|
||||
/*
|
||||
* First free any FIBs that have not been consumed.
|
||||
*/
|
||||
while (!list_empty(&fibctx->fib_list)) {
|
||||
struct list_head * entry;
|
||||
/*
|
||||
* Pull the next fib from the fibs
|
||||
*/
|
||||
entry = fibctx->fib_list.next;
|
||||
list_del(entry);
|
||||
fib = list_entry(entry, struct fib, fiblink);
|
||||
fibctx->count--;
|
||||
/*
|
||||
* Free the space occupied by this copy of the fib.
|
||||
*/
|
||||
kfree(fib->hw_fib);
|
||||
kfree(fib);
|
||||
}
|
||||
/*
|
||||
* Remove the Context from the AdapterFibContext List
|
||||
*/
|
||||
list_del(&fibctx->next);
|
||||
/*
|
||||
* Invalidate context
|
||||
*/
|
||||
fibctx->type = 0;
|
||||
/*
|
||||
* Free the space occupied by the Context
|
||||
*/
|
||||
kfree(fibctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* close_getadapter_fib - close down user fib context
|
||||
* @dev: adapter
|
||||
* @arg: ioctl arguments
|
||||
*
|
||||
* This routine will close down the fibctx passed in from the user.
|
||||
*/
|
||||
|
||||
static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
|
||||
{
|
||||
struct aac_fib_context *fibctx;
|
||||
int status;
|
||||
unsigned long flags;
|
||||
struct list_head * entry;
|
||||
|
||||
/*
|
||||
* Verify that the HANDLE passed in was a valid AdapterFibContext
|
||||
*
|
||||
* Search the list of AdapterFibContext addresses on the adapter
|
||||
* to be sure this is a valid address
|
||||
*/
|
||||
|
||||
entry = dev->fib_list.next;
|
||||
fibctx = NULL;
|
||||
|
||||
while(entry != &dev->fib_list) {
|
||||
fibctx = list_entry(entry, struct aac_fib_context, next);
|
||||
/*
|
||||
* Extract the fibctx from the input parameters
|
||||
*/
|
||||
if (fibctx->unique == (u32)(unsigned long)arg) {
|
||||
/* We found a winner */
|
||||
break;
|
||||
}
|
||||
entry = entry->next;
|
||||
fibctx = NULL;
|
||||
}
|
||||
|
||||
if (!fibctx)
|
||||
return 0; /* Already gone */
|
||||
|
||||
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
|
||||
(fibctx->size != sizeof(struct aac_fib_context)))
|
||||
return -EINVAL;
|
||||
spin_lock_irqsave(&dev->fib_lock, flags);
|
||||
status = aac_close_fib_context(dev, fibctx);
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* check_revision - close down user fib context
|
||||
* @dev: adapter
|
||||
* @arg: ioctl arguments
|
||||
*
|
||||
* This routine returns the driver version.
|
||||
* Under Linux, there have been no version incompatibilities, so this is
|
||||
* simple!
|
||||
*/
|
||||
|
||||
static int check_revision(struct aac_dev *dev, void __user *arg)
|
||||
{
|
||||
struct revision response;
|
||||
|
||||
response.compat = 1;
|
||||
response.version = dev->adapter_info.kernelrev;
|
||||
response.build = dev->adapter_info.kernelbuild;
|
||||
|
||||
if (copy_to_user(arg, &response, sizeof(response)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* aac_send_raw_scb
|
||||
*
|
||||
*/
|
||||
|
||||
int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
{
|
||||
struct fib* srbfib;
|
||||
int status;
|
||||
struct aac_srb *srbcmd;
|
||||
struct aac_srb __user *user_srb = arg;
|
||||
struct aac_srb_reply __user *user_reply;
|
||||
struct aac_srb_reply* reply;
|
||||
u32 fibsize = 0;
|
||||
u32 flags = 0;
|
||||
s32 rcode = 0;
|
||||
u32 data_dir;
|
||||
void __user *sg_user[32];
|
||||
void *sg_list[32];
|
||||
u32 sg_indx = 0;
|
||||
u32 byte_count = 0;
|
||||
u32 actual_fibsize = 0;
|
||||
int i;
|
||||
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN)){
|
||||
printk(KERN_DEBUG"aacraid: No permission to send raw srb\n");
|
||||
return -EPERM;
|
||||
}
|
||||
/*
|
||||
* Allocate and initialize a Fib then setup a BlockWrite command
|
||||
*/
|
||||
if (!(srbfib = fib_alloc(dev))) {
|
||||
return -1;
|
||||
}
|
||||
fib_init(srbfib);
|
||||
|
||||
srbcmd = (struct aac_srb*) fib_data(srbfib);
|
||||
|
||||
if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
|
||||
printk(KERN_DEBUG"aacraid: Could not copy data size from user\n");
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (fibsize > FIB_DATA_SIZE_IN_BYTES) {
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if(copy_from_user(srbcmd, user_srb,fibsize)){
|
||||
printk(KERN_DEBUG"aacraid: Could not copy srb from user\n");
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
user_reply = arg+fibsize;
|
||||
|
||||
flags = srbcmd->flags;
|
||||
// Fix up srb for endian and force some values
|
||||
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
|
||||
srbcmd->channel = cpu_to_le32(srbcmd->channel);
|
||||
srbcmd->id = cpu_to_le32(srbcmd->id);
|
||||
srbcmd->lun = cpu_to_le32(srbcmd->lun);
|
||||
srbcmd->flags = cpu_to_le32(srbcmd->flags);
|
||||
srbcmd->timeout = cpu_to_le32(srbcmd->timeout);
|
||||
srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
|
||||
srbcmd->cdb_size = cpu_to_le32(srbcmd->cdb_size);
|
||||
|
||||
switch (srbcmd->flags & (SRB_DataIn | SRB_DataOut)) {
|
||||
case SRB_DataOut:
|
||||
data_dir = DMA_TO_DEVICE;
|
||||
break;
|
||||
case (SRB_DataIn | SRB_DataOut):
|
||||
data_dir = DMA_BIDIRECTIONAL;
|
||||
break;
|
||||
case SRB_DataIn:
|
||||
data_dir = DMA_FROM_DEVICE;
|
||||
break;
|
||||
default:
|
||||
data_dir = DMA_NONE;
|
||||
}
|
||||
if (dev->dac_support == 1) {
|
||||
struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
|
||||
byte_count = 0;
|
||||
|
||||
/*
|
||||
* This should also catch if user used the 32 bit sgmap
|
||||
*/
|
||||
actual_fibsize = sizeof(struct aac_srb) -
|
||||
sizeof(struct sgentry) + ((srbcmd->sg.count & 0xff) *
|
||||
sizeof(struct sgentry64));
|
||||
if(actual_fibsize != fibsize){ // User made a mistake - should not continue
|
||||
printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if ((data_dir == DMA_NONE) && psg->count) {
|
||||
printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n");
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
for (i = 0; i < psg->count; i++) {
|
||||
dma_addr_t addr;
|
||||
u64 le_addr;
|
||||
void* p;
|
||||
p = kmalloc(psg->sg[i].count,GFP_KERNEL|__GFP_DMA);
|
||||
if(p == 0) {
|
||||
printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
psg->sg[i].count,i,psg->count);
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
sg_user[i] = (void __user *)psg->sg[i].addr;
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if(copy_from_user(p,sg_user[i],psg->sg[i].count)){
|
||||
printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n");
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p, psg->sg[i].count, data_dir);
|
||||
|
||||
le_addr = cpu_to_le64(addr);
|
||||
psg->sg[i].addr[1] = (u32)(le_addr>>32);
|
||||
psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
|
||||
psg->sg[i].count = cpu_to_le32(psg->sg[i].count);
|
||||
byte_count += psg->sg[i].count;
|
||||
}
|
||||
|
||||
srbcmd->count = cpu_to_le32(byte_count);
|
||||
status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
|
||||
} else {
|
||||
struct sgmap* psg = &srbcmd->sg;
|
||||
byte_count = 0;
|
||||
|
||||
actual_fibsize = sizeof (struct aac_srb) +
|
||||
(((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
|
||||
sizeof (struct sgentry));
|
||||
if(actual_fibsize != fibsize){ // User made a mistake - should not continue
|
||||
printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if ((data_dir == DMA_NONE) && psg->count) {
|
||||
printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n");
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
for (i = 0; i < psg->count; i++) {
|
||||
dma_addr_t addr;
|
||||
void* p;
|
||||
p = kmalloc(psg->sg[i].count,GFP_KERNEL);
|
||||
if(p == 0) {
|
||||
printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
psg->sg[i].count,i,psg->count);
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
sg_user[i] = (void __user *)(psg->sg[i].addr);
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if(copy_from_user(p,sg_user[i],psg->sg[i].count)){
|
||||
printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n");
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p, psg->sg[i].count, data_dir);
|
||||
|
||||
psg->sg[i].addr = cpu_to_le32(addr);
|
||||
psg->sg[i].count = cpu_to_le32(psg->sg[i].count);
|
||||
byte_count += psg->sg[i].count;
|
||||
}
|
||||
srbcmd->count = cpu_to_le32(byte_count);
|
||||
status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
|
||||
}
|
||||
|
||||
if (status != 0){
|
||||
printk(KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n");
|
||||
rcode = -1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if( flags & SRB_DataIn ) {
|
||||
for(i = 0 ; i <= sg_indx; i++){
|
||||
if(copy_to_user(sg_user[i],sg_list[i],le32_to_cpu(srbcmd->sg.sg[i].count))){
|
||||
printk(KERN_DEBUG"aacraid: Could not copy sg data to user\n");
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reply = (struct aac_srb_reply *) fib_data(srbfib);
|
||||
if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
|
||||
printk(KERN_DEBUG"aacraid: Could not copy reply to user\n");
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
for(i=0; i <= sg_indx; i++){
|
||||
kfree(sg_list[i]);
|
||||
}
|
||||
fib_complete(srbfib);
|
||||
fib_free(srbfib);
|
||||
|
||||
return rcode;
|
||||
}
|
||||
|
||||
|
||||
struct aac_pci_info {
|
||||
u32 bus;
|
||||
u32 slot;
|
||||
};
|
||||
|
||||
|
||||
int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
|
||||
{
|
||||
struct aac_pci_info pci_info;
|
||||
|
||||
pci_info.bus = dev->pdev->bus->number;
|
||||
pci_info.slot = PCI_SLOT(dev->pdev->devfn);
|
||||
|
||||
if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
|
||||
printk(KERN_DEBUG "aacraid: Could not copy pci info\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
|
||||
{
|
||||
int status;
|
||||
|
||||
/*
|
||||
* HBA gets first crack
|
||||
*/
|
||||
|
||||
status = aac_dev_ioctl(dev, cmd, arg);
|
||||
if(status != -ENOTTY)
|
||||
return status;
|
||||
|
||||
switch (cmd) {
|
||||
case FSACTL_MINIPORT_REV_CHECK:
|
||||
status = check_revision(dev, arg);
|
||||
break;
|
||||
case FSACTL_SENDFIB:
|
||||
status = ioctl_send_fib(dev, arg);
|
||||
break;
|
||||
case FSACTL_OPEN_GET_ADAPTER_FIB:
|
||||
status = open_getadapter_fib(dev, arg);
|
||||
break;
|
||||
case FSACTL_GET_NEXT_ADAPTER_FIB:
|
||||
status = next_getadapter_fib(dev, arg);
|
||||
break;
|
||||
case FSACTL_CLOSE_GET_ADAPTER_FIB:
|
||||
status = close_getadapter_fib(dev, arg);
|
||||
break;
|
||||
case FSACTL_SEND_RAW_SRB:
|
||||
status = aac_send_raw_srb(dev,arg);
|
||||
break;
|
||||
case FSACTL_GET_PCI_INFO:
|
||||
status = aac_get_pci_info(dev,arg);
|
||||
break;
|
||||
default:
|
||||
status = -ENOTTY;
|
||||
break;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
325
drivers/scsi/aacraid/comminit.c
Normal file
325
drivers/scsi/aacraid/comminit.c
Normal file
@@ -0,0 +1,325 @@
|
||||
/*
|
||||
* Adaptec AAC series RAID controller driver
|
||||
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
|
||||
*
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Module Name:
|
||||
* comminit.c
|
||||
*
|
||||
* Abstract: This supports the initialization of the host adapter commuication interface.
|
||||
* This is a platform dependent module for the pci cyclone board.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
struct aac_common aac_config;
|
||||
|
||||
static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
|
||||
{
|
||||
unsigned char *base;
|
||||
unsigned long size, align;
|
||||
unsigned long fibsize = 4096;
|
||||
unsigned long printfbufsiz = 256;
|
||||
struct aac_init *init;
|
||||
dma_addr_t phys;
|
||||
|
||||
size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
|
||||
|
||||
|
||||
base = pci_alloc_consistent(dev->pdev, size, &phys);
|
||||
|
||||
if(base == NULL)
|
||||
{
|
||||
printk(KERN_ERR "aacraid: unable to create mapping.\n");
|
||||
return 0;
|
||||
}
|
||||
dev->comm_addr = (void *)base;
|
||||
dev->comm_phys = phys;
|
||||
dev->comm_size = size;
|
||||
|
||||
dev->init = (struct aac_init *)(base + fibsize);
|
||||
dev->init_pa = phys + fibsize;
|
||||
|
||||
init = dev->init;
|
||||
|
||||
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
|
||||
init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
|
||||
init->fsrev = cpu_to_le32(dev->fsrev);
|
||||
|
||||
/*
|
||||
* Adapter Fibs are the first thing allocated so that they
|
||||
* start page aligned
|
||||
*/
|
||||
dev->aif_base_va = (struct hw_fib *)base;
|
||||
|
||||
init->AdapterFibsVirtualAddress = 0;
|
||||
init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
|
||||
init->AdapterFibsSize = cpu_to_le32(fibsize);
|
||||
init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
|
||||
/*
|
||||
* number of 4k pages of host physical memory. The aacraid fw needs
|
||||
* this number to be less than 4gb worth of pages. num_physpages is in
|
||||
* system page units. New firmware doesn't have any issues with the
|
||||
* mapping system, but older Firmware did, and had *troubles* dealing
|
||||
* with the math overloading past 32 bits, thus we must limit this
|
||||
* field.
|
||||
*
|
||||
* This assumes the memory is mapped zero->n, which isnt
|
||||
* always true on real computers. It also has some slight problems
|
||||
* with the GART on x86-64. I've btw never tried DMA from PCI space
|
||||
* on this platform but don't be suprised if its problematic.
|
||||
*/
|
||||
#ifndef CONFIG_GART_IOMMU
|
||||
if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
|
||||
init->HostPhysMemPages =
|
||||
cpu_to_le32(num_physpages << (PAGE_SHIFT-12));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Increment the base address by the amount already used
|
||||
*/
|
||||
base = base + fibsize + sizeof(struct aac_init);
|
||||
phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
|
||||
/*
|
||||
* Align the beginning of Headers to commalign
|
||||
*/
|
||||
align = (commalign - ((unsigned long)(base) & (commalign - 1)));
|
||||
base = base + align;
|
||||
phys = phys + align;
|
||||
/*
|
||||
* Fill in addresses of the Comm Area Headers and Queues
|
||||
*/
|
||||
*commaddr = base;
|
||||
init->CommHeaderAddress = cpu_to_le32((u32)phys);
|
||||
/*
|
||||
* Increment the base address by the size of the CommArea
|
||||
*/
|
||||
base = base + commsize;
|
||||
phys = phys + commsize;
|
||||
/*
|
||||
* Place the Printf buffer area after the Fast I/O comm area.
|
||||
*/
|
||||
dev->printfbuf = (void *)base;
|
||||
init->printfbuf = cpu_to_le32(phys);
|
||||
init->printfbufsiz = cpu_to_le32(printfbufsiz);
|
||||
memset(base, 0, printfbufsiz);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
|
||||
{
|
||||
q->numpending = 0;
|
||||
q->dev = dev;
|
||||
INIT_LIST_HEAD(&q->pendingq);
|
||||
init_waitqueue_head(&q->cmdready);
|
||||
INIT_LIST_HEAD(&q->cmdq);
|
||||
init_waitqueue_head(&q->qfull);
|
||||
spin_lock_init(&q->lockdata);
|
||||
q->lock = &q->lockdata;
|
||||
q->headers.producer = mem;
|
||||
q->headers.consumer = mem+1;
|
||||
*(q->headers.producer) = cpu_to_le32(qsize);
|
||||
*(q->headers.consumer) = cpu_to_le32(qsize);
|
||||
q->entries = qsize;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_send_shutdown - shutdown an adapter
|
||||
* @dev: Adapter to shutdown
|
||||
*
|
||||
* This routine will send a VM_CloseAll (shutdown) request to the adapter.
|
||||
*/
|
||||
|
||||
int aac_send_shutdown(struct aac_dev * dev)
|
||||
{
|
||||
struct fib * fibctx;
|
||||
struct aac_close *cmd;
|
||||
int status;
|
||||
|
||||
fibctx = fib_alloc(dev);
|
||||
fib_init(fibctx);
|
||||
|
||||
cmd = (struct aac_close *) fib_data(fibctx);
|
||||
|
||||
cmd->command = cpu_to_le32(VM_CloseAll);
|
||||
cmd->cid = cpu_to_le32(0xffffffff);
|
||||
|
||||
status = fib_send(ContainerCommand,
|
||||
fibctx,
|
||||
sizeof(struct aac_close),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
NULL, NULL);
|
||||
|
||||
if (status == 0)
|
||||
fib_complete(fibctx);
|
||||
fib_free(fibctx);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_comm_init - Initialise FSA data structures
|
||||
* @dev: Adapter to initialise
|
||||
*
|
||||
* Initializes the data structures that are required for the FSA commuication
|
||||
* interface to operate.
|
||||
* Returns
|
||||
* 1 - if we were able to init the commuication interface.
|
||||
* 0 - If there were errors initing. This is a fatal error.
|
||||
*/
|
||||
|
||||
int aac_comm_init(struct aac_dev * dev)
|
||||
{
|
||||
unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
|
||||
unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
|
||||
u32 *headers;
|
||||
struct aac_entry * queues;
|
||||
unsigned long size;
|
||||
struct aac_queue_block * comm = dev->queues;
|
||||
/*
|
||||
* Now allocate and initialize the zone structures used as our
|
||||
* pool of FIB context records. The size of the zone is based
|
||||
* on the system memory size. We also initialize the mutex used
|
||||
* to protect the zone.
|
||||
*/
|
||||
spin_lock_init(&dev->fib_lock);
|
||||
|
||||
/*
|
||||
* Allocate the physically contigous space for the commuication
|
||||
* queue headers.
|
||||
*/
|
||||
|
||||
size = hdrsize + queuesize;
|
||||
|
||||
if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
|
||||
return -ENOMEM;
|
||||
|
||||
queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
|
||||
|
||||
/* Adapter to Host normal priority Command queue */
|
||||
comm->queue[HostNormCmdQueue].base = queues;
|
||||
aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
|
||||
queues += HOST_NORM_CMD_ENTRIES;
|
||||
headers += 2;
|
||||
|
||||
/* Adapter to Host high priority command queue */
|
||||
comm->queue[HostHighCmdQueue].base = queues;
|
||||
aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
|
||||
|
||||
queues += HOST_HIGH_CMD_ENTRIES;
|
||||
headers +=2;
|
||||
|
||||
/* Host to adapter normal priority command queue */
|
||||
comm->queue[AdapNormCmdQueue].base = queues;
|
||||
aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
|
||||
|
||||
queues += ADAP_NORM_CMD_ENTRIES;
|
||||
headers += 2;
|
||||
|
||||
/* host to adapter high priority command queue */
|
||||
comm->queue[AdapHighCmdQueue].base = queues;
|
||||
aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
|
||||
|
||||
queues += ADAP_HIGH_CMD_ENTRIES;
|
||||
headers += 2;
|
||||
|
||||
/* adapter to host normal priority response queue */
|
||||
comm->queue[HostNormRespQueue].base = queues;
|
||||
aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
|
||||
queues += HOST_NORM_RESP_ENTRIES;
|
||||
headers += 2;
|
||||
|
||||
/* adapter to host high priority response queue */
|
||||
comm->queue[HostHighRespQueue].base = queues;
|
||||
aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
|
||||
|
||||
queues += HOST_HIGH_RESP_ENTRIES;
|
||||
headers += 2;
|
||||
|
||||
/* host to adapter normal priority response queue */
|
||||
comm->queue[AdapNormRespQueue].base = queues;
|
||||
aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
|
||||
|
||||
queues += ADAP_NORM_RESP_ENTRIES;
|
||||
headers += 2;
|
||||
|
||||
/* host to adapter high priority response queue */
|
||||
comm->queue[AdapHighRespQueue].base = queues;
|
||||
aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
|
||||
|
||||
comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
|
||||
comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
|
||||
comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
|
||||
comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct aac_dev *aac_init_adapter(struct aac_dev *dev)
|
||||
{
|
||||
/*
|
||||
* Ok now init the communication subsystem
|
||||
*/
|
||||
|
||||
dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
|
||||
if (dev->queues == NULL) {
|
||||
printk(KERN_ERR "Error could not allocate comm region.\n");
|
||||
return NULL;
|
||||
}
|
||||
memset(dev->queues, 0, sizeof(struct aac_queue_block));
|
||||
|
||||
if (aac_comm_init(dev)<0){
|
||||
kfree(dev->queues);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Initialize the list of fibs
|
||||
*/
|
||||
if(fib_setup(dev)<0){
|
||||
kfree(dev->queues);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&dev->fib_list);
|
||||
init_completion(&dev->aif_completion);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
|
939
drivers/scsi/aacraid/commsup.c
Normal file
939
drivers/scsi/aacraid/commsup.c
Normal file
@@ -0,0 +1,939 @@
|
||||
/*
|
||||
* Adaptec AAC series RAID controller driver
|
||||
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
|
||||
*
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Module Name:
|
||||
* commsup.c
|
||||
*
|
||||
* Abstract: Contain all routines that are required for FSA host/adapter
|
||||
* commuication.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
/**
|
||||
* fib_map_alloc - allocate the fib objects
|
||||
* @dev: Adapter to allocate for
|
||||
*
|
||||
* Allocate and map the shared PCI space for the FIB blocks used to
|
||||
* talk to the Adaptec firmware.
|
||||
*/
|
||||
|
||||
static int fib_map_alloc(struct aac_dev *dev)
|
||||
{
|
||||
if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fib_map_free - free the fib objects
|
||||
* @dev: Adapter to free
|
||||
*
|
||||
* Free the PCI mappings and the memory allocated for FIB blocks
|
||||
* on this adapter.
|
||||
*/
|
||||
|
||||
void fib_map_free(struct aac_dev *dev)
|
||||
{
|
||||
pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
|
||||
}
|
||||
|
||||
/**
|
||||
* fib_setup - setup the fibs
|
||||
* @dev: Adapter to set up
|
||||
*
|
||||
* Allocate the PCI space for the fibs, map it and then intialise the
|
||||
* fib area, the unmapped fib data and also the free list
|
||||
*/
|
||||
|
||||
int fib_setup(struct aac_dev * dev)
|
||||
{
|
||||
struct fib *fibptr;
|
||||
struct hw_fib *hw_fib_va;
|
||||
dma_addr_t hw_fib_pa;
|
||||
int i;
|
||||
|
||||
if(fib_map_alloc(dev)<0)
|
||||
return -ENOMEM;
|
||||
|
||||
hw_fib_va = dev->hw_fib_va;
|
||||
hw_fib_pa = dev->hw_fib_pa;
|
||||
memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
|
||||
/*
|
||||
* Initialise the fibs
|
||||
*/
|
||||
for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++)
|
||||
{
|
||||
fibptr->dev = dev;
|
||||
fibptr->hw_fib = hw_fib_va;
|
||||
fibptr->data = (void *) fibptr->hw_fib->data;
|
||||
fibptr->next = fibptr+1; /* Forward chain the fibs */
|
||||
init_MUTEX_LOCKED(&fibptr->event_wait);
|
||||
spin_lock_init(&fibptr->event_lock);
|
||||
hw_fib_va->header.XferState = 0xffffffff;
|
||||
hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
|
||||
fibptr->hw_fib_pa = hw_fib_pa;
|
||||
hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib));
|
||||
hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib);
|
||||
}
|
||||
/*
|
||||
* Add the fib chain to the free list
|
||||
*/
|
||||
dev->fibs[AAC_NUM_FIB-1].next = NULL;
|
||||
/*
|
||||
* Enable this to debug out of queue space
|
||||
*/
|
||||
dev->free_fib = &dev->fibs[0];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fib_alloc - allocate a fib
|
||||
* @dev: Adapter to allocate the fib for
|
||||
*
|
||||
* Allocate a fib from the adapter fib pool. If the pool is empty we
|
||||
* wait for fibs to become free.
|
||||
*/
|
||||
|
||||
struct fib * fib_alloc(struct aac_dev *dev)
|
||||
{
|
||||
struct fib * fibptr;
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&dev->fib_lock, flags);
|
||||
fibptr = dev->free_fib;
|
||||
/* Cannot sleep here or you get hangs. Instead we did the
|
||||
maths at compile time. */
|
||||
if(!fibptr)
|
||||
BUG();
|
||||
dev->free_fib = fibptr->next;
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
/*
|
||||
* Set the proper node type code and node byte size
|
||||
*/
|
||||
fibptr->type = FSAFS_NTC_FIB_CONTEXT;
|
||||
fibptr->size = sizeof(struct fib);
|
||||
/*
|
||||
* Null out fields that depend on being zero at the start of
|
||||
* each I/O
|
||||
*/
|
||||
fibptr->hw_fib->header.XferState = 0;
|
||||
fibptr->callback = NULL;
|
||||
fibptr->callback_data = NULL;
|
||||
|
||||
return fibptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* fib_free - free a fib
|
||||
* @fibptr: fib to free up
|
||||
*
|
||||
* Frees up a fib and places it on the appropriate queue
|
||||
* (either free or timed out)
|
||||
*/
|
||||
|
||||
void fib_free(struct fib * fibptr)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
|
||||
if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
|
||||
aac_config.fib_timeouts++;
|
||||
fibptr->next = fibptr->dev->timeout_fib;
|
||||
fibptr->dev->timeout_fib = fibptr;
|
||||
} else {
|
||||
if (fibptr->hw_fib->header.XferState != 0) {
|
||||
printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
|
||||
(void*)fibptr,
|
||||
le32_to_cpu(fibptr->hw_fib->header.XferState));
|
||||
}
|
||||
fibptr->next = fibptr->dev->free_fib;
|
||||
fibptr->dev->free_fib = fibptr;
|
||||
}
|
||||
spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* fib_init - initialise a fib
|
||||
* @fibptr: The fib to initialize
|
||||
*
|
||||
* Set up the generic fib fields ready for use
|
||||
*/
|
||||
|
||||
void fib_init(struct fib *fibptr)
|
||||
{
|
||||
struct hw_fib *hw_fib = fibptr->hw_fib;
|
||||
|
||||
hw_fib->header.StructType = FIB_MAGIC;
|
||||
hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
|
||||
hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
|
||||
hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
|
||||
hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
|
||||
hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
|
||||
}
|
||||
|
||||
/**
|
||||
* fib_deallocate - deallocate a fib
|
||||
* @fibptr: fib to deallocate
|
||||
*
|
||||
* Will deallocate and return to the free pool the FIB pointed to by the
|
||||
* caller.
|
||||
*/
|
||||
|
||||
void fib_dealloc(struct fib * fibptr)
|
||||
{
|
||||
struct hw_fib *hw_fib = fibptr->hw_fib;
|
||||
if(hw_fib->header.StructType != FIB_MAGIC)
|
||||
BUG();
|
||||
hw_fib->header.XferState = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Commuication primitives define and support the queuing method we use to
|
||||
* support host to adapter commuication. All queue accesses happen through
|
||||
* these routines and are the only routines which have a knowledge of the
|
||||
* how these queues are implemented.
|
||||
*/
|
||||
|
||||
/**
|
||||
* aac_get_entry - get a queue entry
|
||||
* @dev: Adapter
|
||||
* @qid: Queue Number
|
||||
* @entry: Entry return
|
||||
* @index: Index return
|
||||
* @nonotify: notification control
|
||||
*
|
||||
* With a priority the routine returns a queue entry if the queue has free entries. If the queue
|
||||
* is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
|
||||
* returned.
|
||||
*/
|
||||
|
||||
static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
|
||||
{
|
||||
struct aac_queue * q;
|
||||
|
||||
/*
|
||||
* All of the queues wrap when they reach the end, so we check
|
||||
* to see if they have reached the end and if they have we just
|
||||
* set the index back to zero. This is a wrap. You could or off
|
||||
* the high bits in all updates but this is a bit faster I think.
|
||||
*/
|
||||
|
||||
q = &dev->queues->queue[qid];
|
||||
|
||||
*index = le32_to_cpu(*(q->headers.producer));
|
||||
if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
|
||||
*nonotify = 1;
|
||||
|
||||
if (qid == AdapHighCmdQueue) {
|
||||
if (*index >= ADAP_HIGH_CMD_ENTRIES)
|
||||
*index = 0;
|
||||
} else if (qid == AdapNormCmdQueue) {
|
||||
if (*index >= ADAP_NORM_CMD_ENTRIES)
|
||||
*index = 0; /* Wrap to front of the Producer Queue. */
|
||||
}
|
||||
else if (qid == AdapHighRespQueue)
|
||||
{
|
||||
if (*index >= ADAP_HIGH_RESP_ENTRIES)
|
||||
*index = 0;
|
||||
}
|
||||
else if (qid == AdapNormRespQueue)
|
||||
{
|
||||
if (*index >= ADAP_NORM_RESP_ENTRIES)
|
||||
*index = 0; /* Wrap to front of the Producer Queue. */
|
||||
}
|
||||
else {
|
||||
printk("aacraid: invalid qid\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
|
||||
printk(KERN_WARNING "Queue %d full, %d outstanding.\n",
|
||||
qid, q->numpending);
|
||||
return 0;
|
||||
} else {
|
||||
*entry = q->base + *index;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_queue_get - get the next free QE
|
||||
* @dev: Adapter
|
||||
* @index: Returned index
|
||||
* @priority: Priority of fib
|
||||
* @fib: Fib to associate with the queue entry
|
||||
* @wait: Wait if queue full
|
||||
* @fibptr: Driver fib object to go with fib
|
||||
* @nonotify: Don't notify the adapter
|
||||
*
|
||||
* Gets the next free QE off the requested priorty adapter command
|
||||
* queue and associates the Fib with the QE. The QE represented by
|
||||
* index is ready to insert on the queue when this routine returns
|
||||
* success.
|
||||
*/
|
||||
|
||||
static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
|
||||
{
|
||||
struct aac_entry * entry = NULL;
|
||||
int map = 0;
|
||||
struct aac_queue * q = &dev->queues->queue[qid];
|
||||
|
||||
spin_lock_irqsave(q->lock, q->SavedIrql);
|
||||
|
||||
if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
|
||||
{
|
||||
/* if no entries wait for some if caller wants to */
|
||||
while (!aac_get_entry(dev, qid, &entry, index, nonotify))
|
||||
{
|
||||
printk(KERN_ERR "GetEntries failed\n");
|
||||
}
|
||||
/*
|
||||
* Setup queue entry with a command, status and fib mapped
|
||||
*/
|
||||
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
|
||||
map = 1;
|
||||
}
|
||||
else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
|
||||
{
|
||||
while(!aac_get_entry(dev, qid, &entry, index, nonotify))
|
||||
{
|
||||
/* if no entries wait for some if caller wants to */
|
||||
}
|
||||
/*
|
||||
* Setup queue entry with command, status and fib mapped
|
||||
*/
|
||||
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
|
||||
entry->addr = hw_fib->header.SenderFibAddress;
|
||||
/* Restore adapters pointer to the FIB */
|
||||
hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
|
||||
map = 0;
|
||||
}
|
||||
/*
|
||||
* If MapFib is true than we need to map the Fib and put pointers
|
||||
* in the queue entry.
|
||||
*/
|
||||
if (map)
|
||||
entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* aac_insert_entry - insert a queue entry
|
||||
* @dev: Adapter
|
||||
* @index: Index of entry to insert
|
||||
* @qid: Queue number
|
||||
* @nonotify: Suppress adapter notification
|
||||
*
|
||||
* Gets the next free QE off the requested priorty adapter command
|
||||
* queue and associates the Fib with the QE. The QE represented by
|
||||
* index is ready to insert on the queue when this routine returns
|
||||
* success.
|
||||
*/
|
||||
|
||||
static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
|
||||
{
|
||||
struct aac_queue * q = &dev->queues->queue[qid];
|
||||
|
||||
if(q == NULL)
|
||||
BUG();
|
||||
*(q->headers.producer) = cpu_to_le32(index + 1);
|
||||
spin_unlock_irqrestore(q->lock, q->SavedIrql);
|
||||
|
||||
if (qid == AdapHighCmdQueue ||
|
||||
qid == AdapNormCmdQueue ||
|
||||
qid == AdapHighRespQueue ||
|
||||
qid == AdapNormRespQueue)
|
||||
{
|
||||
if (!nonotify)
|
||||
aac_adapter_notify(dev, qid);
|
||||
}
|
||||
else
|
||||
printk("Suprise insert!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Define the highest level of host to adapter communication routines.
|
||||
* These routines will support host to adapter FS commuication. These
|
||||
* routines have no knowledge of the commuication method used. This level
|
||||
* sends and receives FIBs. This level has no knowledge of how these FIBs
|
||||
* get passed back and forth.
|
||||
*/
|
||||
|
||||
/**
|
||||
* fib_send - send a fib to the adapter
|
||||
* @command: Command to send
|
||||
* @fibptr: The fib
|
||||
* @size: Size of fib data area
|
||||
* @priority: Priority of Fib
|
||||
* @wait: Async/sync select
|
||||
* @reply: True if a reply is wanted
|
||||
* @callback: Called with reply
|
||||
* @callback_data: Passed to callback
|
||||
*
|
||||
* Sends the requested FIB to the adapter and optionally will wait for a
|
||||
* response FIB. If the caller does not wish to wait for a response than
|
||||
* an event to wait on must be supplied. This event will be set when a
|
||||
* response FIB is received from the adapter.
|
||||
*/
|
||||
|
||||
int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
|
||||
{
|
||||
u32 index;
|
||||
u32 qid;
|
||||
struct aac_dev * dev = fibptr->dev;
|
||||
unsigned long nointr = 0;
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
struct aac_queue * q;
|
||||
unsigned long flags = 0;
|
||||
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
|
||||
return -EBUSY;
|
||||
/*
|
||||
* There are 5 cases with the wait and reponse requested flags.
|
||||
* The only invalid cases are if the caller requests to wait and
|
||||
* does not request a response and if the caller does not want a
|
||||
* response and the Fib is not allocated from pool. If a response
|
||||
* is not requesed the Fib will just be deallocaed by the DPC
|
||||
* routine when the response comes back from the adapter. No
|
||||
* further processing will be done besides deleting the Fib. We
|
||||
* will have a debug mode where the adapter can notify the host
|
||||
* it had a problem and the host can log that fact.
|
||||
*/
|
||||
if (wait && !reply) {
|
||||
return -EINVAL;
|
||||
} else if (!wait && reply) {
|
||||
hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
|
||||
FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
|
||||
} else if (!wait && !reply) {
|
||||
hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
|
||||
FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
|
||||
} else if (wait && reply) {
|
||||
hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
|
||||
FIB_COUNTER_INCREMENT(aac_config.NormalSent);
|
||||
}
|
||||
/*
|
||||
* Map the fib into 32bits by using the fib number
|
||||
*/
|
||||
|
||||
hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
|
||||
hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
|
||||
/*
|
||||
* Set FIB state to indicate where it came from and if we want a
|
||||
* response from the adapter. Also load the command from the
|
||||
* caller.
|
||||
*
|
||||
* Map the hw fib pointer as a 32bit value
|
||||
*/
|
||||
hw_fib->header.Command = cpu_to_le16(command);
|
||||
hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
|
||||
fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/
|
||||
/*
|
||||
* Set the size of the Fib we want to send to the adapter
|
||||
*/
|
||||
hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
|
||||
if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
/*
|
||||
* Get a queue entry connect the FIB to it and send an notify
|
||||
* the adapter a command is ready.
|
||||
*/
|
||||
if (priority == FsaHigh) {
|
||||
hw_fib->header.XferState |= cpu_to_le32(HighPriority);
|
||||
qid = AdapHighCmdQueue;
|
||||
} else {
|
||||
hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
|
||||
qid = AdapNormCmdQueue;
|
||||
}
|
||||
q = &dev->queues->queue[qid];
|
||||
|
||||
if(wait)
|
||||
spin_lock_irqsave(&fibptr->event_lock, flags);
|
||||
if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
|
||||
return -EWOULDBLOCK;
|
||||
dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
|
||||
dprintk((KERN_DEBUG "Fib contents:.\n"));
|
||||
dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
|
||||
dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
|
||||
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
|
||||
dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
|
||||
dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
|
||||
/*
|
||||
* Fill in the Callback and CallbackContext if we are not
|
||||
* going to wait.
|
||||
*/
|
||||
if (!wait) {
|
||||
fibptr->callback = callback;
|
||||
fibptr->callback_data = callback_data;
|
||||
}
|
||||
FIB_COUNTER_INCREMENT(aac_config.FibsSent);
|
||||
list_add_tail(&fibptr->queue, &q->pendingq);
|
||||
q->numpending++;
|
||||
|
||||
fibptr->done = 0;
|
||||
fibptr->flags = 0;
|
||||
|
||||
if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
|
||||
return -EWOULDBLOCK;
|
||||
/*
|
||||
* If the caller wanted us to wait for response wait now.
|
||||
*/
|
||||
|
||||
if (wait) {
|
||||
spin_unlock_irqrestore(&fibptr->event_lock, flags);
|
||||
down(&fibptr->event_wait);
|
||||
if(fibptr->done == 0)
|
||||
BUG();
|
||||
|
||||
if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
|
||||
return -ETIMEDOUT;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If the user does not want a response than return success otherwise
|
||||
* return pending
|
||||
*/
|
||||
if (reply)
|
||||
return -EINPROGRESS;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_consumer_get - get the top of the queue
|
||||
* @dev: Adapter
|
||||
* @q: Queue
|
||||
* @entry: Return entry
|
||||
*
|
||||
* Will return a pointer to the entry on the top of the queue requested that
|
||||
* we are a consumer of, and return the address of the queue entry. It does
|
||||
* not change the state of the queue.
|
||||
*/
|
||||
|
||||
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
|
||||
{
|
||||
u32 index;
|
||||
int status;
|
||||
if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
|
||||
status = 0;
|
||||
} else {
|
||||
/*
|
||||
* The consumer index must be wrapped if we have reached
|
||||
* the end of the queue, else we just use the entry
|
||||
* pointed to by the header index
|
||||
*/
|
||||
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
|
||||
index = 0;
|
||||
else
|
||||
index = le32_to_cpu(*q->headers.consumer);
|
||||
*entry = q->base + index;
|
||||
status = 1;
|
||||
}
|
||||
return(status);
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_consumer_free - free consumer entry
|
||||
* @dev: Adapter
|
||||
* @q: Queue
|
||||
* @qid: Queue ident
|
||||
*
|
||||
* Frees up the current top of the queue we are a consumer of. If the
|
||||
* queue was full notify the producer that the queue is no longer full.
|
||||
*/
|
||||
|
||||
void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
|
||||
{
|
||||
int wasfull = 0;
|
||||
u32 notify;
|
||||
|
||||
if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
|
||||
wasfull = 1;
|
||||
|
||||
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
|
||||
*q->headers.consumer = cpu_to_le32(1);
|
||||
else
|
||||
*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
|
||||
|
||||
if (wasfull) {
|
||||
switch (qid) {
|
||||
|
||||
case HostNormCmdQueue:
|
||||
notify = HostNormCmdNotFull;
|
||||
break;
|
||||
case HostHighCmdQueue:
|
||||
notify = HostHighCmdNotFull;
|
||||
break;
|
||||
case HostNormRespQueue:
|
||||
notify = HostNormRespNotFull;
|
||||
break;
|
||||
case HostHighRespQueue:
|
||||
notify = HostHighRespNotFull;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
aac_adapter_notify(dev, notify);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fib_adapter_complete - complete adapter issued fib
|
||||
* @fibptr: fib to complete
|
||||
* @size: size of fib
|
||||
*
|
||||
* Will do all necessary work to complete a FIB that was sent from
|
||||
* the adapter.
|
||||
*/
|
||||
|
||||
int fib_adapter_complete(struct fib * fibptr, unsigned short size)
|
||||
{
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
struct aac_dev * dev = fibptr->dev;
|
||||
unsigned long nointr = 0;
|
||||
if (hw_fib->header.XferState == 0)
|
||||
return 0;
|
||||
/*
|
||||
* If we plan to do anything check the structure type first.
|
||||
*/
|
||||
if ( hw_fib->header.StructType != FIB_MAGIC ) {
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* This block handles the case where the adapter had sent us a
|
||||
* command and we have finished processing the command. We
|
||||
* call completeFib when we are done processing the command
|
||||
* and want to send a response back to the adapter. This will
|
||||
* send the completed cdb to the adapter.
|
||||
*/
|
||||
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
|
||||
hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
|
||||
if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
|
||||
u32 index;
|
||||
if (size)
|
||||
{
|
||||
size += sizeof(struct aac_fibhdr);
|
||||
if (size > le16_to_cpu(hw_fib->header.SenderSize))
|
||||
return -EMSGSIZE;
|
||||
hw_fib->header.Size = cpu_to_le16(size);
|
||||
}
|
||||
if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
|
||||
}
|
||||
}
|
||||
else if (hw_fib->header.XferState & NormalPriority)
|
||||
{
|
||||
u32 index;
|
||||
|
||||
if (size) {
|
||||
size += sizeof(struct aac_fibhdr);
|
||||
if (size > le16_to_cpu(hw_fib->header.SenderSize))
|
||||
return -EMSGSIZE;
|
||||
hw_fib->header.Size = cpu_to_le16(size);
|
||||
}
|
||||
if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
|
||||
return -EWOULDBLOCK;
|
||||
if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
|
||||
BUG();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fib_complete - fib completion handler
|
||||
* @fib: FIB to complete
|
||||
*
|
||||
* Will do all necessary work to complete a FIB.
|
||||
*/
|
||||
|
||||
int fib_complete(struct fib * fibptr)
|
||||
{
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
|
||||
/*
|
||||
* Check for a fib which has already been completed
|
||||
*/
|
||||
|
||||
if (hw_fib->header.XferState == 0)
|
||||
return 0;
|
||||
/*
|
||||
* If we plan to do anything check the structure type first.
|
||||
*/
|
||||
|
||||
if (hw_fib->header.StructType != FIB_MAGIC)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* This block completes a cdb which orginated on the host and we
|
||||
* just need to deallocate the cdb or reinit it. At this point the
|
||||
* command is complete that we had sent to the adapter and this
|
||||
* cdb could be reused.
|
||||
*/
|
||||
if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
|
||||
(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
|
||||
{
|
||||
fib_dealloc(fibptr);
|
||||
}
|
||||
else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
|
||||
{
|
||||
/*
|
||||
* This handles the case when the host has aborted the I/O
|
||||
* to the adapter because the adapter is not responding
|
||||
*/
|
||||
fib_dealloc(fibptr);
|
||||
} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
|
||||
fib_dealloc(fibptr);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_printf - handle printf from firmware
|
||||
* @dev: Adapter
|
||||
* @val: Message info
|
||||
*
|
||||
* Print a message passed to us by the controller firmware on the
|
||||
* Adaptec board
|
||||
*/
|
||||
|
||||
void aac_printf(struct aac_dev *dev, u32 val)
|
||||
{
|
||||
int length = val & 0xffff;
|
||||
int level = (val >> 16) & 0xffff;
|
||||
char *cp = dev->printfbuf;
|
||||
|
||||
/*
|
||||
* The size of the printfbuf is set in port.c
|
||||
* There is no variable or define for it
|
||||
*/
|
||||
if (length > 255)
|
||||
length = 255;
|
||||
if (cp[length] != 0)
|
||||
cp[length] = 0;
|
||||
if (level == LOG_AAC_HIGH_ERROR)
|
||||
printk(KERN_WARNING "aacraid:%s", cp);
|
||||
else
|
||||
printk(KERN_INFO "aacraid:%s", cp);
|
||||
memset(cp, 0, 256);
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_command_thread - command processing thread
|
||||
* @dev: Adapter to monitor
|
||||
*
|
||||
* Waits on the commandready event in it's queue. When the event gets set
|
||||
* it will pull FIBs off it's queue. It will continue to pull FIBs off
|
||||
* until the queue is empty. When the queue is empty it will wait for
|
||||
* more FIBs.
|
||||
*/
|
||||
|
||||
int aac_command_thread(struct aac_dev * dev)
|
||||
{
|
||||
struct hw_fib *hw_fib, *hw_newfib;
|
||||
struct fib *fib, *newfib;
|
||||
struct aac_queue_block *queues = dev->queues;
|
||||
struct aac_fib_context *fibctx;
|
||||
unsigned long flags;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
/*
|
||||
* We can only have one thread per adapter for AIF's.
|
||||
*/
|
||||
if (dev->aif_thread)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Set up the name that will appear in 'ps'
|
||||
* stored in task_struct.comm[16].
|
||||
*/
|
||||
daemonize("aacraid");
|
||||
allow_signal(SIGKILL);
|
||||
/*
|
||||
* Let the DPC know it has a place to send the AIF's to.
|
||||
*/
|
||||
dev->aif_thread = 1;
|
||||
add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while(1)
|
||||
{
|
||||
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
|
||||
while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
|
||||
struct list_head *entry;
|
||||
struct aac_aifcmd * aifcmd;
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
entry = queues->queue[HostNormCmdQueue].cmdq.next;
|
||||
list_del(entry);
|
||||
|
||||
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
|
||||
fib = list_entry(entry, struct fib, fiblink);
|
||||
/*
|
||||
* We will process the FIB here or pass it to a
|
||||
* worker thread that is TBD. We Really can't
|
||||
* do anything at this point since we don't have
|
||||
* anything defined for this thread to do.
|
||||
*/
|
||||
hw_fib = fib->hw_fib;
|
||||
memset(fib, 0, sizeof(struct fib));
|
||||
fib->type = FSAFS_NTC_FIB_CONTEXT;
|
||||
fib->size = sizeof( struct fib );
|
||||
fib->hw_fib = hw_fib;
|
||||
fib->data = hw_fib->data;
|
||||
fib->dev = dev;
|
||||
/*
|
||||
* We only handle AifRequest fibs from the adapter.
|
||||
*/
|
||||
aifcmd = (struct aac_aifcmd *) hw_fib->data;
|
||||
if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
|
||||
/* Handle Driver Notify Events */
|
||||
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
|
||||
fib_adapter_complete(fib, sizeof(u32));
|
||||
} else {
|
||||
struct list_head *entry;
|
||||
/* The u32 here is important and intended. We are using
|
||||
32bit wrapping time to fit the adapter field */
|
||||
|
||||
u32 time_now, time_last;
|
||||
unsigned long flagv;
|
||||
|
||||
time_now = jiffies/HZ;
|
||||
|
||||
spin_lock_irqsave(&dev->fib_lock, flagv);
|
||||
entry = dev->fib_list.next;
|
||||
/*
|
||||
* For each Context that is on the
|
||||
* fibctxList, make a copy of the
|
||||
* fib, and then set the event to wake up the
|
||||
* thread that is waiting for it.
|
||||
*/
|
||||
while (entry != &dev->fib_list) {
|
||||
/*
|
||||
* Extract the fibctx
|
||||
*/
|
||||
fibctx = list_entry(entry, struct aac_fib_context, next);
|
||||
/*
|
||||
* Check if the queue is getting
|
||||
* backlogged
|
||||
*/
|
||||
if (fibctx->count > 20)
|
||||
{
|
||||
/*
|
||||
* It's *not* jiffies folks,
|
||||
* but jiffies / HZ so do not
|
||||
* panic ...
|
||||
*/
|
||||
time_last = fibctx->jiffies;
|
||||
/*
|
||||
* Has it been > 2 minutes
|
||||
* since the last read off
|
||||
* the queue?
|
||||
*/
|
||||
if ((time_now - time_last) > 120) {
|
||||
entry = entry->next;
|
||||
aac_close_fib_context(dev, fibctx);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Warning: no sleep allowed while
|
||||
* holding spinlock
|
||||
*/
|
||||
hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
|
||||
newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
|
||||
if (newfib && hw_newfib) {
|
||||
/*
|
||||
* Make the copy of the FIB
|
||||
*/
|
||||
memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
|
||||
memcpy(newfib, fib, sizeof(struct fib));
|
||||
newfib->hw_fib = hw_newfib;
|
||||
/*
|
||||
* Put the FIB onto the
|
||||
* fibctx's fibs
|
||||
*/
|
||||
list_add_tail(&newfib->fiblink, &fibctx->fib_list);
|
||||
fibctx->count++;
|
||||
/*
|
||||
* Set the event to wake up the
|
||||
* thread that will waiting.
|
||||
*/
|
||||
up(&fibctx->wait_sem);
|
||||
} else {
|
||||
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
|
||||
if(newfib)
|
||||
kfree(newfib);
|
||||
if(hw_newfib)
|
||||
kfree(hw_newfib);
|
||||
}
|
||||
entry = entry->next;
|
||||
}
|
||||
/*
|
||||
* Set the status of this FIB
|
||||
*/
|
||||
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
|
||||
fib_adapter_complete(fib, sizeof(u32));
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flagv);
|
||||
}
|
||||
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
|
||||
kfree(fib);
|
||||
}
|
||||
/*
|
||||
* There are no more AIF's
|
||||
*/
|
||||
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
|
||||
schedule();
|
||||
|
||||
if(signal_pending(current))
|
||||
break;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
|
||||
dev->aif_thread = 0;
|
||||
complete_and_exit(&dev->aif_completion, 0);
|
||||
}
|
215
drivers/scsi/aacraid/dpcsup.c
Normal file
215
drivers/scsi/aacraid/dpcsup.c
Normal file
@@ -0,0 +1,215 @@
|
||||
/*
|
||||
* Adaptec AAC series RAID controller driver
|
||||
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
|
||||
*
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Module Name:
|
||||
* dpcsup.c
|
||||
*
|
||||
* Abstract: All DPC processing routines for the cyclone board occur here.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
/**
|
||||
* aac_response_normal - Handle command replies
|
||||
* @q: Queue to read from
|
||||
*
|
||||
* This DPC routine will be run when the adapter interrupts us to let us
|
||||
* know there is a response on our normal priority queue. We will pull off
|
||||
* all QE there are and wake up all the waiters before exiting. We will
|
||||
* take a spinlock out on the queue before operating on it.
|
||||
*/
|
||||
|
||||
unsigned int aac_response_normal(struct aac_queue * q)
|
||||
{
|
||||
struct aac_dev * dev = q->dev;
|
||||
struct aac_entry *entry;
|
||||
struct hw_fib * hwfib;
|
||||
struct fib * fib;
|
||||
int consumed = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->lock, flags);
|
||||
/*
|
||||
* Keep pulling response QEs off the response queue and waking
|
||||
* up the waiters until there are no more QEs. We then return
|
||||
* back to the system. If no response was requesed we just
|
||||
* deallocate the Fib here and continue.
|
||||
*/
|
||||
while(aac_consumer_get(dev, q, &entry))
|
||||
{
|
||||
int fast;
|
||||
u32 index = le32_to_cpu(entry->addr);
|
||||
fast = index & 0x01;
|
||||
fib = &dev->fibs[index >> 1];
|
||||
hwfib = fib->hw_fib;
|
||||
|
||||
aac_consumer_free(dev, q, HostNormRespQueue);
|
||||
/*
|
||||
* Remove this fib from the Outstanding I/O queue.
|
||||
* But only if it has not already been timed out.
|
||||
*
|
||||
* If the fib has been timed out already, then just
|
||||
* continue. The caller has already been notified that
|
||||
* the fib timed out.
|
||||
*/
|
||||
if (!(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
|
||||
list_del(&fib->queue);
|
||||
dev->queues->queue[AdapNormCmdQueue].numpending--;
|
||||
} else {
|
||||
printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
|
||||
printk(KERN_DEBUG"aacraid: hwfib=%p fib index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib);
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irqrestore(q->lock, flags);
|
||||
|
||||
if (fast) {
|
||||
/*
|
||||
* Doctor the fib
|
||||
*/
|
||||
*(u32 *)hwfib->data = cpu_to_le32(ST_OK);
|
||||
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
|
||||
}
|
||||
|
||||
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
|
||||
|
||||
if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
|
||||
{
|
||||
u32 *pstatus = (u32 *)hwfib->data;
|
||||
if (*pstatus & cpu_to_le32(0xffff0000))
|
||||
*pstatus = cpu_to_le32(ST_OK);
|
||||
}
|
||||
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
|
||||
{
|
||||
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
|
||||
FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
|
||||
else
|
||||
FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
|
||||
/*
|
||||
* NOTE: we cannot touch the fib after this
|
||||
* call, because it may have been deallocated.
|
||||
*/
|
||||
fib->callback(fib->callback_data, fib);
|
||||
} else {
|
||||
unsigned long flagv;
|
||||
spin_lock_irqsave(&fib->event_lock, flagv);
|
||||
fib->done = 1;
|
||||
up(&fib->event_wait);
|
||||
spin_unlock_irqrestore(&fib->event_lock, flagv);
|
||||
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
|
||||
}
|
||||
consumed++;
|
||||
spin_lock_irqsave(q->lock, flags);
|
||||
}
|
||||
|
||||
if (consumed > aac_config.peak_fibs)
|
||||
aac_config.peak_fibs = consumed;
|
||||
if (consumed == 0)
|
||||
aac_config.zero_fibs++;
|
||||
|
||||
spin_unlock_irqrestore(q->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* aac_command_normal - handle commands
|
||||
* @q: queue to process
|
||||
*
|
||||
* This DPC routine will be queued when the adapter interrupts us to
|
||||
* let us know there is a command on our normal priority queue. We will
|
||||
* pull off all QE there are and wake up all the waiters before exiting.
|
||||
* We will take a spinlock out on the queue before operating on it.
|
||||
*/
|
||||
|
||||
unsigned int aac_command_normal(struct aac_queue *q)
|
||||
{
|
||||
struct aac_dev * dev = q->dev;
|
||||
struct aac_entry *entry;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->lock, flags);
|
||||
|
||||
/*
|
||||
* Keep pulling response QEs off the response queue and waking
|
||||
* up the waiters until there are no more QEs. We then return
|
||||
* back to the system.
|
||||
*/
|
||||
while(aac_consumer_get(dev, q, &entry))
|
||||
{
|
||||
struct fib fibctx;
|
||||
struct hw_fib * hw_fib;
|
||||
u32 index;
|
||||
struct fib *fib = &fibctx;
|
||||
|
||||
index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
|
||||
hw_fib = &dev->aif_base_va[index];
|
||||
|
||||
/*
|
||||
* Allocate a FIB at all costs. For non queued stuff
|
||||
* we can just use the stack so we are happy. We need
|
||||
* a fib object in order to manage the linked lists
|
||||
*/
|
||||
if (dev->aif_thread)
|
||||
if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
|
||||
fib = &fibctx;
|
||||
|
||||
memset(fib, 0, sizeof(struct fib));
|
||||
INIT_LIST_HEAD(&fib->fiblink);
|
||||
fib->type = FSAFS_NTC_FIB_CONTEXT;
|
||||
fib->size = sizeof(struct fib);
|
||||
fib->hw_fib = hw_fib;
|
||||
fib->data = hw_fib->data;
|
||||
fib->dev = dev;
|
||||
|
||||
|
||||
if (dev->aif_thread && fib != &fibctx) {
|
||||
list_add_tail(&fib->fiblink, &q->cmdq);
|
||||
aac_consumer_free(dev, q, HostNormCmdQueue);
|
||||
wake_up_interruptible(&q->cmdready);
|
||||
} else {
|
||||
aac_consumer_free(dev, q, HostNormCmdQueue);
|
||||
spin_unlock_irqrestore(q->lock, flags);
|
||||
/*
|
||||
* Set the status of this FIB
|
||||
*/
|
||||
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
|
||||
fib_adapter_complete(fib, sizeof(u32));
|
||||
spin_lock_irqsave(q->lock, flags);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(q->lock, flags);
|
||||
return 0;
|
||||
}
|
749
drivers/scsi/aacraid/linit.c
Normal file
749
drivers/scsi/aacraid/linit.c
Normal file
@@ -0,0 +1,749 @@
|
||||
/*
|
||||
* Adaptec AAC series RAID controller driver
|
||||
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
|
||||
*
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Module Name:
|
||||
* linit.c
|
||||
*
|
||||
* Abstract: Linux Driver entry module for Adaptec RAID Array Controller
|
||||
*/
|
||||
|
||||
#define AAC_DRIVER_VERSION "1.1.2-lk2"
|
||||
#define AAC_DRIVER_BUILD_DATE __DATE__
|
||||
#define AAC_DRIVERNAME "aacraid"
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/ioctl32.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/scsicam.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
|
||||
MODULE_AUTHOR("Red Hat Inc and Adaptec");
|
||||
MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
|
||||
"Adaptec Advanced Raid Products, "
|
||||
"and HP NetRAID-4M SCSI driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(AAC_DRIVER_VERSION);
|
||||
|
||||
static LIST_HEAD(aac_devices);
|
||||
static int aac_cfg_major = -1;
|
||||
|
||||
/*
|
||||
* Because of the way Linux names scsi devices, the order in this table has
|
||||
* become important. Check for on-board Raid first, add-in cards second.
|
||||
*
|
||||
* Note: The last field is used to index into aac_drivers below.
|
||||
*/
|
||||
static struct pci_device_id aac_pci_tbl[] = {
|
||||
{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
|
||||
{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
|
||||
{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
|
||||
{ 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
|
||||
{ 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
|
||||
{ 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
|
||||
{ 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
|
||||
{ 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
|
||||
{ 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
|
||||
{ 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
|
||||
{ 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
|
||||
{ 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
|
||||
{ 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
|
||||
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
|
||||
{ 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
|
||||
{ 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
|
||||
{ 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
|
||||
{ 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
|
||||
{ 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
|
||||
{ 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 25 }, /* Callisto Jupiter Platform */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 26 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 27 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 28 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
|
||||
{ 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 29 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 30 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 31 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 32 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
|
||||
{ 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 33 }, /* AAR-2610SA PCI SATA 6ch */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 34 }, /* ASR-2240S (SabreExpress) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 35 }, /* ASR-4005SAS */
|
||||
{ 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 36 }, /* IBM 8i (AvonPark) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 37 }, /* ASR-4000SAS (BlackBird) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 38 }, /* ASR-4800SAS (Marauder-X) */
|
||||
{ 0x9005, 0x0285, 0x9005, 0x029A, 0, 0, 39 }, /* ASR-4805SAS (Marauder-E) */
|
||||
|
||||
{ 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 40 }, /* Perc 320/DC*/
|
||||
{ 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 41 }, /* Adaptec 5400S (Mustang)*/
|
||||
{ 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 42 }, /* Adaptec 5400S (Mustang)*/
|
||||
{ 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 43 }, /* Dell PERC2/QC */
|
||||
{ 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 44 }, /* HP NetRAID-4M */
|
||||
|
||||
{ 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 45 }, /* Dell Catchall */
|
||||
{ 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 46 }, /* Legend Catchall */
|
||||
{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 47 }, /* Adaptec Catch All */
|
||||
{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 48 }, /* Adaptec Rocket Catch All */
|
||||
{ 0,}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
|
||||
|
||||
/*
|
||||
* dmb - For now we add the number of channels to this structure.
|
||||
* In the future we should add a fib that reports the number of channels
|
||||
* for the card. At that time we can remove the channels from here
|
||||
*/
|
||||
static struct aac_driver_ident aac_drivers[] = {
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 2/Si (Iguana/PERC2Si) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Opal/PERC3Di) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Si (SlimFast/PERC3Si */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Viper/PERC3DiV) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Lexus/PERC3DiL) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Dagger/PERC3DiD) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Boxster/PERC3DiB) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* catapult */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* tomcat */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan-2m) */
|
||||
{ aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S220 (Legend Crusader) */
|
||||
{ aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S230 (Legend Vulcan) */
|
||||
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
|
||||
{ aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
|
||||
{ aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
|
||||
{ aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
|
||||
{ aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
|
||||
{ aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
|
||||
{ aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
|
||||
{ aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005SAS ", 1 }, /* ASR-4005SAS */
|
||||
{ aac_rx_init, "aacraid", "IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000SAS ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
|
||||
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
|
||||
{ aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
|
||||
{ aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
|
||||
{ aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell PERC2/QC */
|
||||
{ aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
|
||||
|
||||
{ aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
|
||||
{ aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */
|
||||
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec Rocket Catch All */
|
||||
};
|
||||
|
||||
/**
|
||||
* aac_queuecommand - queue a SCSI command
|
||||
* @cmd: SCSI command to queue
|
||||
* @done: Function to call on command completion
|
||||
*
|
||||
* Queues a command for execution by the associated Host Adapter.
|
||||
*
|
||||
* TODO: unify with aac_scsi_cmd().
|
||||
*/
|
||||
|
||||
static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
cmd->scsi_done = done;
|
||||
return (aac_scsi_cmd(cmd) ? FAILED : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_info - Returns the host adapter name
|
||||
* @shost: Scsi host to report on
|
||||
*
|
||||
* Returns a static string describing the device in question
|
||||
*/
|
||||
|
||||
const char *aac_info(struct Scsi_Host *shost)
|
||||
{
|
||||
struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
|
||||
return aac_drivers[dev->cardtype].name;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_get_driver_ident
|
||||
* @devtype: index into lookup table
|
||||
*
|
||||
* Returns a pointer to the entry in the driver lookup table.
|
||||
*/
|
||||
|
||||
struct aac_driver_ident* aac_get_driver_ident(int devtype)
|
||||
{
|
||||
return &aac_drivers[devtype];
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_biosparm - return BIOS parameters for disk
|
||||
* @sdev: The scsi device corresponding to the disk
|
||||
* @bdev: the block device corresponding to the disk
|
||||
* @capacity: the sector capacity of the disk
|
||||
* @geom: geometry block to fill in
|
||||
*
|
||||
* Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
|
||||
* The default disk geometry is 64 heads, 32 sectors, and the appropriate
|
||||
* number of cylinders so as not to exceed drive capacity. In order for
|
||||
* disks equal to or larger than 1 GB to be addressable by the BIOS
|
||||
* without exceeding the BIOS limitation of 1024 cylinders, Extended
|
||||
* Translation should be enabled. With Extended Translation enabled,
|
||||
* drives between 1 GB inclusive and 2 GB exclusive are given a disk
|
||||
* geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
|
||||
* are given a disk geometry of 255 heads and 63 sectors. However, if
|
||||
* the BIOS detects that the Extended Translation setting does not match
|
||||
* the geometry in the partition table, then the translation inferred
|
||||
* from the partition table will be used by the BIOS, and a warning may
|
||||
* be displayed.
|
||||
*/
|
||||
|
||||
static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
|
||||
sector_t capacity, int *geom)
|
||||
{
|
||||
struct diskparm *param = (struct diskparm *)geom;
|
||||
unsigned char *buf;
|
||||
|
||||
dprintk((KERN_DEBUG "aac_biosparm.\n"));
|
||||
|
||||
/*
|
||||
* Assuming extended translation is enabled - #REVISIT#
|
||||
*/
|
||||
if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
|
||||
if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
|
||||
param->heads = 255;
|
||||
param->sectors = 63;
|
||||
} else {
|
||||
param->heads = 128;
|
||||
param->sectors = 32;
|
||||
}
|
||||
} else {
|
||||
param->heads = 64;
|
||||
param->sectors = 32;
|
||||
}
|
||||
|
||||
param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
|
||||
|
||||
/*
|
||||
* Read the first 1024 bytes from the disk device, if the boot
|
||||
* sector partition table is valid, search for a partition table
|
||||
* entry whose end_head matches one of the standard geometry
|
||||
* translations ( 64/32, 128/32, 255/63 ).
|
||||
*/
|
||||
buf = scsi_bios_ptable(bdev);
|
||||
if(*(unsigned short *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
|
||||
struct partition *first = (struct partition * )buf;
|
||||
struct partition *entry = first;
|
||||
int saved_cylinders = param->cylinders;
|
||||
int num;
|
||||
unsigned char end_head, end_sec;
|
||||
|
||||
for(num = 0; num < 4; num++) {
|
||||
end_head = entry->end_head;
|
||||
end_sec = entry->end_sector & 0x3f;
|
||||
|
||||
if(end_head == 63) {
|
||||
param->heads = 64;
|
||||
param->sectors = 32;
|
||||
break;
|
||||
} else if(end_head == 127) {
|
||||
param->heads = 128;
|
||||
param->sectors = 32;
|
||||
break;
|
||||
} else if(end_head == 254) {
|
||||
param->heads = 255;
|
||||
param->sectors = 63;
|
||||
break;
|
||||
}
|
||||
entry++;
|
||||
}
|
||||
|
||||
if (num == 4) {
|
||||
end_head = first->end_head;
|
||||
end_sec = first->end_sector & 0x3f;
|
||||
}
|
||||
|
||||
param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
|
||||
if (num < 4 && end_sec == param->sectors) {
|
||||
if (param->cylinders != saved_cylinders)
|
||||
dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
|
||||
param->heads, param->sectors, num));
|
||||
} else if (end_head > 0 || end_sec > 0) {
|
||||
dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
|
||||
end_head + 1, end_sec, num));
|
||||
dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
|
||||
param->heads, param->sectors));
|
||||
}
|
||||
}
|
||||
kfree(buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_slave_configure - compute queue depths
|
||||
* @sdev: SCSI device we are considering
|
||||
*
|
||||
* Selects queue depths for each target device based on the host adapter's
|
||||
* total capacity and the queue depth supported by the target device.
|
||||
* A queue depth of one automatically disables tagged queueing.
|
||||
*/
|
||||
|
||||
static int aac_slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
if (sdev->tagged_supported)
|
||||
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128);
|
||||
else
|
||||
scsi_adjust_queue_depth(sdev, 0, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
|
||||
{
|
||||
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
|
||||
return aac_do_ioctl(dev, cmd, arg);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: does aac really need no error handling??
|
||||
*/
|
||||
static int aac_eh_abort(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/*
|
||||
* aac_eh_reset - Reset command handling
|
||||
* @scsi_cmd: SCSI command block causing the reset
|
||||
*
|
||||
*/
|
||||
static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
{
|
||||
struct scsi_device * dev = cmd->device;
|
||||
struct Scsi_Host * host = dev->host;
|
||||
struct scsi_cmnd * command;
|
||||
int count;
|
||||
struct aac_dev * aac;
|
||||
unsigned long flags;
|
||||
|
||||
printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
|
||||
AAC_DRIVERNAME);
|
||||
|
||||
|
||||
aac = (struct aac_dev *)host->hostdata;
|
||||
if (aac_adapter_check_health(aac)) {
|
||||
printk(KERN_ERR "%s: Host adapter appears dead\n",
|
||||
AAC_DRIVERNAME);
|
||||
return -ENODEV;
|
||||
}
|
||||
/*
|
||||
* Wait for all commands to complete to this specific
|
||||
* target (block maximum 60 seconds).
|
||||
*/
|
||||
for (count = 60; count; --count) {
|
||||
int active = 0;
|
||||
__shost_for_each_device(dev, host) {
|
||||
spin_lock_irqsave(&dev->list_lock, flags);
|
||||
list_for_each_entry(command, &dev->cmd_list, list) {
|
||||
if (command->serial_number) {
|
||||
active++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->list_lock, flags);
|
||||
if (active)
|
||||
break;
|
||||
|
||||
}
|
||||
/*
|
||||
* We can exit If all the commands are complete
|
||||
*/
|
||||
if (active == 0)
|
||||
return SUCCESS;
|
||||
spin_unlock_irq(host->host_lock);
|
||||
ssleep(1);
|
||||
spin_lock_irq(host->host_lock);
|
||||
}
|
||||
printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_cfg_open - open a configuration file
|
||||
* @inode: inode being opened
|
||||
* @file: file handle attached
|
||||
*
|
||||
* Called when the configuration device is opened. Does the needed
|
||||
* set up on the handle and then returns
|
||||
*
|
||||
* Bugs: This needs extending to check a given adapter is present
|
||||
* so we can support hot plugging, and to ref count adapters.
|
||||
*/
|
||||
|
||||
static int aac_cfg_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct aac_dev *aac;
|
||||
unsigned minor = iminor(inode);
|
||||
int err = -ENODEV;
|
||||
|
||||
list_for_each_entry(aac, &aac_devices, entry) {
|
||||
if (aac->id == minor) {
|
||||
file->private_data = aac;
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_cfg_ioctl - AAC configuration request
|
||||
* @inode: inode of device
|
||||
* @file: file handle
|
||||
* @cmd: ioctl command code
|
||||
* @arg: argument
|
||||
*
|
||||
* Handles a configuration ioctl. Currently this involves wrapping it
|
||||
* up and feeding it into the nasty windowsalike glue layer.
|
||||
*
|
||||
* Bugs: Needs locking against parallel ioctls lower down
|
||||
* Bugs: Needs to handle hot plugging
|
||||
*/
|
||||
|
||||
static int aac_cfg_ioctl(struct inode *inode, struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
|
||||
{
|
||||
long ret;
|
||||
lock_kernel();
|
||||
switch (cmd) {
|
||||
case FSACTL_MINIPORT_REV_CHECK:
|
||||
case FSACTL_SENDFIB:
|
||||
case FSACTL_OPEN_GET_ADAPTER_FIB:
|
||||
case FSACTL_CLOSE_GET_ADAPTER_FIB:
|
||||
case FSACTL_SEND_RAW_SRB:
|
||||
case FSACTL_GET_PCI_INFO:
|
||||
case FSACTL_QUERY_DISK:
|
||||
case FSACTL_DELETE_DISK:
|
||||
case FSACTL_FORCE_DELETE_DISK:
|
||||
case FSACTL_GET_CONTAINERS:
|
||||
ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
|
||||
break;
|
||||
|
||||
case FSACTL_GET_NEXT_ADAPTER_FIB: {
|
||||
struct fib_ioctl __user *f;
|
||||
|
||||
f = compat_alloc_user_space(sizeof(*f));
|
||||
ret = 0;
|
||||
if (clear_user(f, sizeof(*f) != sizeof(*f)))
|
||||
ret = -EFAULT;
|
||||
if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
|
||||
ret = -EFAULT;
|
||||
if (!ret)
|
||||
ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
ret = -ENOIOCTLCMD;
|
||||
break;
|
||||
}
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
|
||||
{
|
||||
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
|
||||
return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
|
||||
}
|
||||
|
||||
static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||
{
|
||||
return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct file_operations aac_cfg_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.ioctl = aac_cfg_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = aac_compat_cfg_ioctl,
|
||||
#endif
|
||||
.open = aac_cfg_open,
|
||||
};
|
||||
|
||||
static struct scsi_host_template aac_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "AAC",
|
||||
.proc_name = "aacraid",
|
||||
.info = aac_info,
|
||||
.ioctl = aac_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = aac_compat_ioctl,
|
||||
#endif
|
||||
.queuecommand = aac_queuecommand,
|
||||
.bios_param = aac_biosparm,
|
||||
.slave_configure = aac_slave_configure,
|
||||
.eh_abort_handler = aac_eh_abort,
|
||||
.eh_host_reset_handler = aac_eh_reset,
|
||||
.can_queue = AAC_NUM_IO_FIB,
|
||||
.this_id = 16,
|
||||
.sg_tablesize = 16,
|
||||
.max_sectors = 128,
|
||||
#if (AAC_NUM_IO_FIB > 256)
|
||||
.cmd_per_lun = 256,
|
||||
#else
|
||||
.cmd_per_lun = AAC_NUM_IO_FIB,
|
||||
#endif
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
};
|
||||
|
||||
|
||||
static int __devinit aac_probe_one(struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
unsigned index = id->driver_data;
|
||||
struct Scsi_Host *shost;
|
||||
struct aac_dev *aac;
|
||||
struct list_head *insert = &aac_devices;
|
||||
int error = -ENODEV;
|
||||
int unique_id = 0;
|
||||
|
||||
list_for_each_entry(aac, &aac_devices, entry) {
|
||||
if (aac->id > unique_id)
|
||||
break;
|
||||
insert = &aac->entry;
|
||||
unique_id++;
|
||||
}
|
||||
|
||||
if (pci_enable_device(pdev))
|
||||
goto out;
|
||||
|
||||
if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
|
||||
pci_set_consistent_dma_mask(pdev, 0xFFFFFFFFULL))
|
||||
goto out;
|
||||
/*
|
||||
* If the quirk31 bit is set, the adapter needs adapter
|
||||
* to driver communication memory to be allocated below 2gig
|
||||
*/
|
||||
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
|
||||
if (pci_set_dma_mask(pdev, 0x7FFFFFFFULL) ||
|
||||
pci_set_consistent_dma_mask(pdev, 0x7FFFFFFFULL))
|
||||
goto out;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
|
||||
if (!shost)
|
||||
goto out_disable_pdev;
|
||||
|
||||
shost->irq = pdev->irq;
|
||||
shost->base = pci_resource_start(pdev, 0);
|
||||
shost->unique_id = unique_id;
|
||||
|
||||
aac = (struct aac_dev *)shost->hostdata;
|
||||
aac->scsi_host_ptr = shost;
|
||||
aac->pdev = pdev;
|
||||
aac->name = aac_driver_template.name;
|
||||
aac->id = shost->unique_id;
|
||||
aac->cardtype = index;
|
||||
INIT_LIST_HEAD(&aac->entry);
|
||||
|
||||
aac->fibs = kmalloc(sizeof(struct fib) * AAC_NUM_FIB, GFP_KERNEL);
|
||||
if (!aac->fibs)
|
||||
goto out_free_host;
|
||||
spin_lock_init(&aac->fib_lock);
|
||||
|
||||
if ((*aac_drivers[index].init)(aac))
|
||||
goto out_free_fibs;
|
||||
|
||||
/*
|
||||
* If we had set a smaller DMA mask earlier, set it to 4gig
|
||||
* now since the adapter can dma data to at least a 4gig
|
||||
* address space.
|
||||
*/
|
||||
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
|
||||
if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL))
|
||||
goto out_free_fibs;
|
||||
|
||||
aac_get_adapter_info(aac);
|
||||
|
||||
/*
|
||||
* max channel will be the physical channels plus 1 virtual channel
|
||||
* all containers are on the virtual channel 0
|
||||
* physical channels are address by their actual physical number+1
|
||||
*/
|
||||
if (aac->nondasd_support == 1)
|
||||
shost->max_channel = aac_drivers[index].channels+1;
|
||||
else
|
||||
shost->max_channel = 1;
|
||||
|
||||
aac_get_config_status(aac);
|
||||
aac_get_containers(aac);
|
||||
list_add(&aac->entry, insert);
|
||||
|
||||
shost->max_id = aac->maximum_num_containers;
|
||||
if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
|
||||
shost->max_id = MAXIMUM_NUM_CONTAINERS;
|
||||
else
|
||||
shost->this_id = shost->max_id;
|
||||
|
||||
/*
|
||||
* dmb - we may need to move the setting of these parms somewhere else once
|
||||
* we get a fib that can report the actual numbers
|
||||
*/
|
||||
shost->max_lun = AAC_MAX_LUN;
|
||||
|
||||
pci_set_drvdata(pdev, shost);
|
||||
|
||||
error = scsi_add_host(shost, &pdev->dev);
|
||||
if (error)
|
||||
goto out_deinit;
|
||||
scsi_scan_host(shost);
|
||||
|
||||
return 0;
|
||||
|
||||
out_deinit:
|
||||
kill_proc(aac->thread_pid, SIGKILL, 0);
|
||||
wait_for_completion(&aac->aif_completion);
|
||||
|
||||
aac_send_shutdown(aac);
|
||||
fib_map_free(aac);
|
||||
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
|
||||
kfree(aac->queues);
|
||||
free_irq(pdev->irq, aac);
|
||||
iounmap(aac->regs.sa);
|
||||
out_free_fibs:
|
||||
kfree(aac->fibs);
|
||||
kfree(aac->fsa_dev);
|
||||
out_free_host:
|
||||
scsi_host_put(shost);
|
||||
out_disable_pdev:
|
||||
pci_disable_device(pdev);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void __devexit aac_remove_one(struct pci_dev *pdev)
|
||||
{
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
|
||||
|
||||
scsi_remove_host(shost);
|
||||
|
||||
kill_proc(aac->thread_pid, SIGKILL, 0);
|
||||
wait_for_completion(&aac->aif_completion);
|
||||
|
||||
aac_send_shutdown(aac);
|
||||
fib_map_free(aac);
|
||||
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
|
||||
aac->comm_phys);
|
||||
kfree(aac->queues);
|
||||
|
||||
free_irq(pdev->irq, aac);
|
||||
iounmap(aac->regs.sa);
|
||||
|
||||
kfree(aac->fibs);
|
||||
|
||||
list_del(&aac->entry);
|
||||
scsi_host_put(shost);
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static struct pci_driver aac_pci_driver = {
|
||||
.name = AAC_DRIVERNAME,
|
||||
.id_table = aac_pci_tbl,
|
||||
.probe = aac_probe_one,
|
||||
.remove = __devexit_p(aac_remove_one),
|
||||
};
|
||||
|
||||
static int __init aac_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n",
|
||||
AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE);
|
||||
|
||||
error = pci_module_init(&aac_pci_driver);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
|
||||
if (aac_cfg_major < 0) {
|
||||
printk(KERN_WARNING
|
||||
"aacraid: unable to register \"aac\" device.\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit aac_exit(void)
|
||||
{
|
||||
unregister_chrdev(aac_cfg_major, "aac");
|
||||
pci_unregister_driver(&aac_pci_driver);
|
||||
}
|
||||
|
||||
module_init(aac_init);
|
||||
module_exit(aac_exit);
|
440
drivers/scsi/aacraid/rkt.c
Normal file
440
drivers/scsi/aacraid/rkt.c
Normal file
@@ -0,0 +1,440 @@
|
||||
/*
|
||||
* Adaptec AAC series RAID controller driver
|
||||
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
|
||||
*
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Module Name:
|
||||
* rkt.c
|
||||
*
|
||||
* Abstract: Hardware miniport for Drawbridge specific hardware functions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
|
||||
{
|
||||
struct aac_dev *dev = dev_id;
|
||||
unsigned long bellbits;
|
||||
u8 intstat, mask;
|
||||
intstat = rkt_readb(dev, MUnit.OISR);
|
||||
/*
|
||||
* Read mask and invert because drawbridge is reversed.
|
||||
* This allows us to only service interrupts that have
|
||||
* been enabled.
|
||||
*/
|
||||
mask = ~(dev->OIMR);
|
||||
/* Check to see if this is our interrupt. If it isn't just return */
|
||||
if (intstat & mask)
|
||||
{
|
||||
bellbits = rkt_readl(dev, OutboundDoorbellReg);
|
||||
if (bellbits & DoorBellPrintfReady) {
|
||||
aac_printf(dev, rkt_readl(dev, IndexRegs.Mailbox[5]));
|
||||
rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady);
|
||||
rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormCmdReady) {
|
||||
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
|
||||
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormRespReady) {
|
||||
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
|
||||
rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
|
||||
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormRespNotFull) {
|
||||
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
|
||||
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* rkt_sync_cmd - send a command and wait
|
||||
* @dev: Adapter
|
||||
* @command: Command to execute
|
||||
* @p1: first parameter
|
||||
* @ret: adapter status
|
||||
*
|
||||
* This routine will send a synchronous command to the adapter and wait
|
||||
* for its completion.
|
||||
*/
|
||||
|
||||
static int rkt_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
|
||||
{
|
||||
unsigned long start;
|
||||
int ok;
|
||||
/*
|
||||
* Write the command into Mailbox 0
|
||||
*/
|
||||
rkt_writel(dev, InboundMailbox0, command);
|
||||
/*
|
||||
* Write the parameters into Mailboxes 1 - 4
|
||||
*/
|
||||
rkt_writel(dev, InboundMailbox1, p1);
|
||||
rkt_writel(dev, InboundMailbox2, 0);
|
||||
rkt_writel(dev, InboundMailbox3, 0);
|
||||
rkt_writel(dev, InboundMailbox4, 0);
|
||||
/*
|
||||
* Clear the synch command doorbell to start on a clean slate.
|
||||
*/
|
||||
rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
|
||||
/*
|
||||
* Disable doorbell interrupts
|
||||
*/
|
||||
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
|
||||
/*
|
||||
* Force the completion of the mask register write before issuing
|
||||
* the interrupt.
|
||||
*/
|
||||
rkt_readb (dev, MUnit.OIMR);
|
||||
/*
|
||||
* Signal that there is a new synch command
|
||||
*/
|
||||
rkt_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
|
||||
|
||||
ok = 0;
|
||||
start = jiffies;
|
||||
|
||||
/*
|
||||
* Wait up to 30 seconds
|
||||
*/
|
||||
while (time_before(jiffies, start+30*HZ))
|
||||
{
|
||||
udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
|
||||
/*
|
||||
* Mon960 will set doorbell0 bit when it has completed the command.
|
||||
*/
|
||||
if (rkt_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
|
||||
/*
|
||||
* Clear the doorbell.
|
||||
*/
|
||||
rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
|
||||
ok = 1;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Yield the processor in case we are slow
|
||||
*/
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
}
|
||||
if (ok != 1) {
|
||||
/*
|
||||
* Restore interrupt mask even though we timed out
|
||||
*/
|
||||
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
/*
|
||||
* Pull the synch status from Mailbox 0.
|
||||
*/
|
||||
if (status)
|
||||
*status = rkt_readl(dev, IndexRegs.Mailbox[0]);
|
||||
/*
|
||||
* Clear the synch command doorbell.
|
||||
*/
|
||||
rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
|
||||
/*
|
||||
* Restore interrupt mask
|
||||
*/
|
||||
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rkt_interrupt_adapter - interrupt adapter
|
||||
* @dev: Adapter
|
||||
*
|
||||
* Send an interrupt to the i960 and breakpoint it.
|
||||
*/
|
||||
|
||||
static void aac_rkt_interrupt_adapter(struct aac_dev *dev)
|
||||
{
|
||||
u32 ret;
|
||||
rkt_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rkt_notify_adapter - send an event to the adapter
|
||||
* @dev: Adapter
|
||||
* @event: Event to send
|
||||
*
|
||||
* Notify the i960 that something it probably cares about has
|
||||
* happened.
|
||||
*/
|
||||
|
||||
static void aac_rkt_notify_adapter(struct aac_dev *dev, u32 event)
|
||||
{
|
||||
switch (event) {
|
||||
|
||||
case AdapNormCmdQue:
|
||||
rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
|
||||
break;
|
||||
case HostNormRespNotFull:
|
||||
rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
|
||||
break;
|
||||
case AdapNormRespQue:
|
||||
rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
|
||||
break;
|
||||
case HostNormCmdNotFull:
|
||||
rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
|
||||
break;
|
||||
case HostShutdown:
|
||||
// rkt_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
|
||||
break;
|
||||
case FastIo:
|
||||
rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
|
||||
break;
|
||||
case AdapPrintfDone:
|
||||
rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rkt_start_adapter - activate adapter
|
||||
* @dev: Adapter
|
||||
*
|
||||
* Start up processing on an i960 based AAC adapter
|
||||
*/
|
||||
|
||||
static void aac_rkt_start_adapter(struct aac_dev *dev)
|
||||
{
|
||||
u32 status;
|
||||
struct aac_init *init;
|
||||
|
||||
init = dev->init;
|
||||
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
|
||||
/*
|
||||
* Tell the adapter we are back and up and running so it will scan
|
||||
* its command queues and enable our interrupts
|
||||
*/
|
||||
dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
|
||||
/*
|
||||
* First clear out all interrupts. Then enable the one's that we
|
||||
* can handle.
|
||||
*/
|
||||
rkt_writeb(dev, MUnit.OIMR, 0xff);
|
||||
rkt_writel(dev, MUnit.ODR, 0xffffffff);
|
||||
// rkt_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
|
||||
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
|
||||
|
||||
// We can only use a 32 bit address here
|
||||
rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &status);
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rkt_check_health
|
||||
* @dev: device to check if healthy
|
||||
*
|
||||
* Will attempt to determine if the specified adapter is alive and
|
||||
* capable of handling requests, returning 0 if alive.
|
||||
*/
|
||||
static int aac_rkt_check_health(struct aac_dev *dev)
|
||||
{
|
||||
u32 status = rkt_readl(dev, MUnit.OMRx[0]);
|
||||
|
||||
/*
|
||||
* Check to see if the board failed any self tests.
|
||||
*/
|
||||
if (status & SELF_TEST_FAILED)
|
||||
return -1;
|
||||
/*
|
||||
* Check to see if the board panic'd.
|
||||
*/
|
||||
if (status & KERNEL_PANIC) {
|
||||
char * buffer;
|
||||
struct POSTSTATUS {
|
||||
u32 Post_Command;
|
||||
u32 Post_Address;
|
||||
} * post;
|
||||
dma_addr_t paddr, baddr;
|
||||
int ret;
|
||||
|
||||
if ((status & 0xFF000000L) == 0xBC000000L)
|
||||
return (status >> 16) & 0xFF;
|
||||
buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
|
||||
ret = -2;
|
||||
if (buffer == NULL)
|
||||
return ret;
|
||||
post = pci_alloc_consistent(dev->pdev,
|
||||
sizeof(struct POSTSTATUS), &paddr);
|
||||
if (post == NULL) {
|
||||
pci_free_consistent(dev->pdev, 512, buffer, baddr);
|
||||
return ret;
|
||||
}
|
||||
memset(buffer, 0, 512);
|
||||
post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
|
||||
post->Post_Address = cpu_to_le32(baddr);
|
||||
rkt_writel(dev, MUnit.IMRx[0], paddr);
|
||||
rkt_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, &status);
|
||||
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
|
||||
post, paddr);
|
||||
if ((buffer[0] == '0') && (buffer[1] == 'x')) {
|
||||
ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
|
||||
ret <<= 4;
|
||||
ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
|
||||
}
|
||||
pci_free_consistent(dev->pdev, 512, buffer, baddr);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Wait for the adapter to be up and running.
|
||||
*/
|
||||
if (!(status & KERNEL_UP_AND_RUNNING))
|
||||
return -3;
|
||||
/*
|
||||
* Everything is OK
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rkt_init - initialize an i960 based AAC card
|
||||
* @dev: device to configure
|
||||
*
|
||||
* Allocate and set up resources for the i960 based AAC variants. The
|
||||
* device_interface in the commregion will be allocated and linked
|
||||
* to the comm region.
|
||||
*/
|
||||
|
||||
int aac_rkt_init(struct aac_dev *dev)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long status;
|
||||
int instance;
|
||||
const char * name;
|
||||
|
||||
instance = dev->id;
|
||||
name = dev->name;
|
||||
|
||||
/*
|
||||
* Map in the registers from the adapter.
|
||||
*/
|
||||
if((dev->regs.rkt = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
|
||||
{
|
||||
printk(KERN_WARNING "aacraid: unable to map i960.\n" );
|
||||
goto error_iounmap;
|
||||
}
|
||||
/*
|
||||
* Check to see if the board failed any self tests.
|
||||
*/
|
||||
if (rkt_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
|
||||
printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
/*
|
||||
* Check to see if the monitor panic'd while booting.
|
||||
*/
|
||||
if (rkt_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
|
||||
printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
/*
|
||||
* Check to see if the board panic'd while booting.
|
||||
*/
|
||||
if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
|
||||
printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
start = jiffies;
|
||||
/*
|
||||
* Wait for the adapter to be up and running. Wait up to 3 minutes
|
||||
*/
|
||||
while (!(rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING))
|
||||
{
|
||||
if(time_after(jiffies, start+180*HZ))
|
||||
{
|
||||
status = rkt_readl(dev, MUnit.OMRx[0]);
|
||||
printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
|
||||
dev->name, instance, status);
|
||||
goto error_iounmap;
|
||||
}
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
}
|
||||
if (request_irq(dev->scsi_host_ptr->irq, aac_rkt_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
|
||||
{
|
||||
printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
/*
|
||||
* Fill in the function dispatch table.
|
||||
*/
|
||||
dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter;
|
||||
dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
|
||||
dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
|
||||
dev->a_ops.adapter_check_health = aac_rkt_check_health;
|
||||
|
||||
if (aac_init_adapter(dev) == NULL)
|
||||
goto error_irq;
|
||||
/*
|
||||
* Start any kernel threads needed
|
||||
*/
|
||||
dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
|
||||
if(dev->thread_pid < 0)
|
||||
{
|
||||
printk(KERN_ERR "aacraid: Unable to create rkt thread.\n");
|
||||
goto error_kfree;
|
||||
}
|
||||
/*
|
||||
* Tell the adapter that all is configured, and it can start
|
||||
* accepting requests
|
||||
*/
|
||||
aac_rkt_start_adapter(dev);
|
||||
return 0;
|
||||
|
||||
error_kfree:
|
||||
kfree(dev->queues);
|
||||
|
||||
error_irq:
|
||||
free_irq(dev->scsi_host_ptr->irq, (void *)dev);
|
||||
|
||||
error_iounmap:
|
||||
iounmap(dev->regs.rkt);
|
||||
|
||||
return -1;
|
||||
}
|
441
drivers/scsi/aacraid/rx.c
Normal file
441
drivers/scsi/aacraid/rx.c
Normal file
@@ -0,0 +1,441 @@
|
||||
/*
|
||||
* Adaptec AAC series RAID controller driver
|
||||
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
|
||||
*
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Module Name:
|
||||
* rx.c
|
||||
*
|
||||
* Abstract: Hardware miniport for Drawbridge specific hardware functions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
|
||||
{
|
||||
struct aac_dev *dev = dev_id;
|
||||
unsigned long bellbits;
|
||||
u8 intstat, mask;
|
||||
intstat = rx_readb(dev, MUnit.OISR);
|
||||
/*
|
||||
* Read mask and invert because drawbridge is reversed.
|
||||
* This allows us to only service interrupts that have
|
||||
* been enabled.
|
||||
*/
|
||||
mask = ~(dev->OIMR);
|
||||
/* Check to see if this is our interrupt. If it isn't just return */
|
||||
if (intstat & mask)
|
||||
{
|
||||
bellbits = rx_readl(dev, OutboundDoorbellReg);
|
||||
if (bellbits & DoorBellPrintfReady) {
|
||||
aac_printf(dev, le32_to_cpu(rx_readl (dev, IndexRegs.Mailbox[5])));
|
||||
rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
|
||||
rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormCmdReady) {
|
||||
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
|
||||
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormRespReady) {
|
||||
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
|
||||
rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
|
||||
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormRespNotFull) {
|
||||
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
|
||||
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* rx_sync_cmd - send a command and wait
|
||||
* @dev: Adapter
|
||||
* @command: Command to execute
|
||||
* @p1: first parameter
|
||||
* @ret: adapter status
|
||||
*
|
||||
* This routine will send a synchronous command to the adapter and wait
|
||||
* for its completion.
|
||||
*/
|
||||
|
||||
static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
|
||||
{
|
||||
unsigned long start;
|
||||
int ok;
|
||||
/*
|
||||
* Write the command into Mailbox 0
|
||||
*/
|
||||
rx_writel(dev, InboundMailbox0, command);
|
||||
/*
|
||||
* Write the parameters into Mailboxes 1 - 4
|
||||
*/
|
||||
rx_writel(dev, InboundMailbox1, p1);
|
||||
rx_writel(dev, InboundMailbox2, 0);
|
||||
rx_writel(dev, InboundMailbox3, 0);
|
||||
rx_writel(dev, InboundMailbox4, 0);
|
||||
/*
|
||||
* Clear the synch command doorbell to start on a clean slate.
|
||||
*/
|
||||
rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
|
||||
/*
|
||||
* Disable doorbell interrupts
|
||||
*/
|
||||
rx_writeb(dev, MUnit.OIMR, dev->OIMR |= 0x04);
|
||||
/*
|
||||
* Force the completion of the mask register write before issuing
|
||||
* the interrupt.
|
||||
*/
|
||||
rx_readb (dev, MUnit.OIMR);
|
||||
/*
|
||||
* Signal that there is a new synch command
|
||||
*/
|
||||
rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
|
||||
|
||||
ok = 0;
|
||||
start = jiffies;
|
||||
|
||||
/*
|
||||
* Wait up to 30 seconds
|
||||
*/
|
||||
while (time_before(jiffies, start+30*HZ))
|
||||
{
|
||||
udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
|
||||
/*
|
||||
* Mon960 will set doorbell0 bit when it has completed the command.
|
||||
*/
|
||||
if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
|
||||
/*
|
||||
* Clear the doorbell.
|
||||
*/
|
||||
rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
|
||||
ok = 1;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Yield the processor in case we are slow
|
||||
*/
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
}
|
||||
if (ok != 1) {
|
||||
/*
|
||||
* Restore interrupt mask even though we timed out
|
||||
*/
|
||||
rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
/*
|
||||
* Pull the synch status from Mailbox 0.
|
||||
*/
|
||||
if (status)
|
||||
*status = rx_readl(dev, IndexRegs.Mailbox[0]);
|
||||
/*
|
||||
* Clear the synch command doorbell.
|
||||
*/
|
||||
rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
|
||||
/*
|
||||
* Restore interrupt mask
|
||||
*/
|
||||
rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rx_interrupt_adapter - interrupt adapter
|
||||
* @dev: Adapter
|
||||
*
|
||||
* Send an interrupt to the i960 and breakpoint it.
|
||||
*/
|
||||
|
||||
static void aac_rx_interrupt_adapter(struct aac_dev *dev)
|
||||
{
|
||||
u32 ret;
|
||||
rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rx_notify_adapter - send an event to the adapter
|
||||
* @dev: Adapter
|
||||
* @event: Event to send
|
||||
*
|
||||
* Notify the i960 that something it probably cares about has
|
||||
* happened.
|
||||
*/
|
||||
|
||||
static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
|
||||
{
|
||||
switch (event) {
|
||||
|
||||
case AdapNormCmdQue:
|
||||
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
|
||||
break;
|
||||
case HostNormRespNotFull:
|
||||
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
|
||||
break;
|
||||
case AdapNormRespQue:
|
||||
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
|
||||
break;
|
||||
case HostNormCmdNotFull:
|
||||
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
|
||||
break;
|
||||
case HostShutdown:
|
||||
// rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
|
||||
break;
|
||||
case FastIo:
|
||||
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
|
||||
break;
|
||||
case AdapPrintfDone:
|
||||
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rx_start_adapter - activate adapter
|
||||
* @dev: Adapter
|
||||
*
|
||||
* Start up processing on an i960 based AAC adapter
|
||||
*/
|
||||
|
||||
static void aac_rx_start_adapter(struct aac_dev *dev)
|
||||
{
|
||||
u32 status;
|
||||
struct aac_init *init;
|
||||
|
||||
init = dev->init;
|
||||
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
|
||||
/*
|
||||
* Tell the adapter we are back and up and running so it will scan
|
||||
* its command queues and enable our interrupts
|
||||
*/
|
||||
dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
|
||||
/*
|
||||
* First clear out all interrupts. Then enable the one's that we
|
||||
* can handle.
|
||||
*/
|
||||
rx_writeb(dev, MUnit.OIMR, 0xff);
|
||||
rx_writel(dev, MUnit.ODR, 0xffffffff);
|
||||
// rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
|
||||
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
|
||||
|
||||
// We can only use a 32 bit address here
|
||||
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &status);
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rx_check_health
|
||||
* @dev: device to check if healthy
|
||||
*
|
||||
* Will attempt to determine if the specified adapter is alive and
|
||||
* capable of handling requests, returning 0 if alive.
|
||||
*/
|
||||
static int aac_rx_check_health(struct aac_dev *dev)
|
||||
{
|
||||
u32 status = rx_readl(dev, MUnit.OMRx[0]);
|
||||
|
||||
/*
|
||||
* Check to see if the board failed any self tests.
|
||||
*/
|
||||
if (status & SELF_TEST_FAILED)
|
||||
return -1;
|
||||
/*
|
||||
* Check to see if the board panic'd.
|
||||
*/
|
||||
if (status & KERNEL_PANIC) {
|
||||
char * buffer;
|
||||
struct POSTSTATUS {
|
||||
u32 Post_Command;
|
||||
u32 Post_Address;
|
||||
} * post;
|
||||
dma_addr_t paddr, baddr;
|
||||
int ret;
|
||||
|
||||
if ((status & 0xFF000000L) == 0xBC000000L)
|
||||
return (status >> 16) & 0xFF;
|
||||
buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
|
||||
ret = -2;
|
||||
if (buffer == NULL)
|
||||
return ret;
|
||||
post = pci_alloc_consistent(dev->pdev,
|
||||
sizeof(struct POSTSTATUS), &paddr);
|
||||
if (post == NULL) {
|
||||
pci_free_consistent(dev->pdev, 512, buffer, baddr);
|
||||
return ret;
|
||||
}
|
||||
memset(buffer, 0, 512);
|
||||
post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
|
||||
post->Post_Address = cpu_to_le32(baddr);
|
||||
rx_writel(dev, MUnit.IMRx[0], paddr);
|
||||
rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, &status);
|
||||
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
|
||||
post, paddr);
|
||||
if ((buffer[0] == '0') && (buffer[1] == 'x')) {
|
||||
ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
|
||||
ret <<= 4;
|
||||
ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
|
||||
}
|
||||
pci_free_consistent(dev->pdev, 512, buffer, baddr);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Wait for the adapter to be up and running.
|
||||
*/
|
||||
if (!(status & KERNEL_UP_AND_RUNNING))
|
||||
return -3;
|
||||
/*
|
||||
* Everything is OK
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_rx_init - initialize an i960 based AAC card
|
||||
* @dev: device to configure
|
||||
*
|
||||
* Allocate and set up resources for the i960 based AAC variants. The
|
||||
* device_interface in the commregion will be allocated and linked
|
||||
* to the comm region.
|
||||
*/
|
||||
|
||||
int aac_rx_init(struct aac_dev *dev)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long status;
|
||||
int instance;
|
||||
const char * name;
|
||||
|
||||
instance = dev->id;
|
||||
name = dev->name;
|
||||
|
||||
/*
|
||||
* Map in the registers from the adapter.
|
||||
*/
|
||||
if((dev->regs.rx = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
|
||||
{
|
||||
printk(KERN_WARNING "aacraid: unable to map i960.\n" );
|
||||
return -1;
|
||||
}
|
||||
/*
|
||||
* Check to see if the board failed any self tests.
|
||||
*/
|
||||
if (rx_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
|
||||
printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
/*
|
||||
* Check to see if the board panic'd while booting.
|
||||
*/
|
||||
if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
|
||||
printk(KERN_ERR "%s%d: adapter kernel panic.\n", dev->name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
/*
|
||||
* Check to see if the monitor panic'd while booting.
|
||||
*/
|
||||
if (rx_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
|
||||
printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
start = jiffies;
|
||||
/*
|
||||
* Wait for the adapter to be up and running. Wait up to 3 minutes
|
||||
*/
|
||||
while ((!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING))
|
||||
|| (!(rx_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING)))
|
||||
{
|
||||
if(time_after(jiffies, start+180*HZ))
|
||||
{
|
||||
status = rx_readl(dev, IndexRegs.Mailbox[7]);
|
||||
printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
|
||||
dev->name, instance, status);
|
||||
goto error_iounmap;
|
||||
}
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
}
|
||||
if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
|
||||
{
|
||||
printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
/*
|
||||
* Fill in the function dispatch table.
|
||||
*/
|
||||
dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
|
||||
dev->a_ops.adapter_notify = aac_rx_notify_adapter;
|
||||
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
|
||||
dev->a_ops.adapter_check_health = aac_rx_check_health;
|
||||
|
||||
if (aac_init_adapter(dev) == NULL)
|
||||
goto error_irq;
|
||||
/*
|
||||
* Start any kernel threads needed
|
||||
*/
|
||||
dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
|
||||
if(dev->thread_pid < 0)
|
||||
{
|
||||
printk(KERN_ERR "aacraid: Unable to create rx thread.\n");
|
||||
goto error_kfree;
|
||||
}
|
||||
/*
|
||||
* Tell the adapter that all is configured, and it can start
|
||||
* accepting requests
|
||||
*/
|
||||
aac_rx_start_adapter(dev);
|
||||
return 0;
|
||||
|
||||
error_kfree:
|
||||
kfree(dev->queues);
|
||||
|
||||
error_irq:
|
||||
free_irq(dev->scsi_host_ptr->irq, (void *)dev);
|
||||
|
||||
error_iounmap:
|
||||
iounmap(dev->regs.rx);
|
||||
|
||||
return -1;
|
||||
}
|
374
drivers/scsi/aacraid/sa.c
Normal file
374
drivers/scsi/aacraid/sa.c
Normal file
@@ -0,0 +1,374 @@
|
||||
/*
|
||||
* Adaptec AAC series RAID controller driver
|
||||
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
|
||||
*
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Module Name:
|
||||
* sa.c
|
||||
*
|
||||
* Abstract: Drawbridge specific support functions
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
static irqreturn_t aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
|
||||
{
|
||||
struct aac_dev *dev = dev_id;
|
||||
unsigned short intstat, mask;
|
||||
|
||||
intstat = sa_readw(dev, DoorbellReg_p);
|
||||
/*
|
||||
* Read mask and invert because drawbridge is reversed.
|
||||
* This allows us to only service interrupts that have been enabled.
|
||||
*/
|
||||
mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
|
||||
|
||||
/* Check to see if this is our interrupt. If it isn't just return */
|
||||
|
||||
if (intstat & mask) {
|
||||
if (intstat & PrintfReady) {
|
||||
aac_printf(dev, sa_readl(dev, Mailbox5));
|
||||
sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
|
||||
sa_writew(dev, DoorbellReg_s, PrintfDone);
|
||||
} else if (intstat & DOORBELL_1) { // dev -> Host Normal Command Ready
|
||||
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
|
||||
sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
|
||||
} else if (intstat & DOORBELL_2) { // dev -> Host Normal Response Ready
|
||||
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
|
||||
sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
|
||||
} else if (intstat & DOORBELL_3) { // dev -> Host Normal Command Not Full
|
||||
sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
|
||||
} else if (intstat & DOORBELL_4) { // dev -> Host Normal Response Not Full
|
||||
sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_sa_notify_adapter - handle adapter notification
|
||||
* @dev: Adapter that notification is for
|
||||
* @event: Event to notidy
|
||||
*
|
||||
* Notify the adapter of an event
|
||||
*/
|
||||
|
||||
void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
|
||||
{
|
||||
switch (event) {
|
||||
|
||||
case AdapNormCmdQue:
|
||||
sa_writew(dev, DoorbellReg_s,DOORBELL_1);
|
||||
break;
|
||||
case HostNormRespNotFull:
|
||||
sa_writew(dev, DoorbellReg_s,DOORBELL_4);
|
||||
break;
|
||||
case AdapNormRespQue:
|
||||
sa_writew(dev, DoorbellReg_s,DOORBELL_2);
|
||||
break;
|
||||
case HostNormCmdNotFull:
|
||||
sa_writew(dev, DoorbellReg_s,DOORBELL_3);
|
||||
break;
|
||||
case HostShutdown:
|
||||
//sa_sync_cmd(dev, HOST_CRASHING, 0, &ret);
|
||||
break;
|
||||
case FastIo:
|
||||
sa_writew(dev, DoorbellReg_s,DOORBELL_6);
|
||||
break;
|
||||
case AdapPrintfDone:
|
||||
sa_writew(dev, DoorbellReg_s,DOORBELL_5);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* sa_sync_cmd - send a command and wait
|
||||
* @dev: Adapter
|
||||
* @command: Command to execute
|
||||
* @p1: first parameter
|
||||
* @ret: adapter status
|
||||
*
|
||||
* This routine will send a synchronous command to the adapter and wait
|
||||
* for its completion.
|
||||
*/
|
||||
|
||||
static int sa_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *ret)
|
||||
{
|
||||
unsigned long start;
|
||||
int ok;
|
||||
/*
|
||||
* Write the Command into Mailbox 0
|
||||
*/
|
||||
sa_writel(dev, Mailbox0, command);
|
||||
/*
|
||||
* Write the parameters into Mailboxes 1 - 4
|
||||
*/
|
||||
sa_writel(dev, Mailbox1, p1);
|
||||
sa_writel(dev, Mailbox2, 0);
|
||||
sa_writel(dev, Mailbox3, 0);
|
||||
sa_writel(dev, Mailbox4, 0);
|
||||
/*
|
||||
* Clear the synch command doorbell to start on a clean slate.
|
||||
*/
|
||||
sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
|
||||
/*
|
||||
* Signal that there is a new synch command
|
||||
*/
|
||||
sa_writew(dev, DoorbellReg_s, DOORBELL_0);
|
||||
|
||||
ok = 0;
|
||||
start = jiffies;
|
||||
|
||||
while(time_before(jiffies, start+30*HZ))
|
||||
{
|
||||
/*
|
||||
* Delay 5uS so that the monitor gets access
|
||||
*/
|
||||
udelay(5);
|
||||
/*
|
||||
* Mon110 will set doorbell0 bit when it has
|
||||
* completed the command.
|
||||
*/
|
||||
if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0) {
|
||||
ok = 1;
|
||||
break;
|
||||
}
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
}
|
||||
|
||||
if (ok != 1)
|
||||
return -ETIMEDOUT;
|
||||
/*
|
||||
* Clear the synch command doorbell.
|
||||
*/
|
||||
sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
|
||||
/*
|
||||
* Pull the synch status from Mailbox 0.
|
||||
*/
|
||||
if (ret)
|
||||
*ret = sa_readl(dev, Mailbox0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_sa_interrupt_adapter - interrupt an adapter
|
||||
* @dev: Which adapter to enable.
|
||||
*
|
||||
* Breakpoint an adapter.
|
||||
*/
|
||||
|
||||
static void aac_sa_interrupt_adapter (struct aac_dev *dev)
|
||||
{
|
||||
u32 ret;
|
||||
sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_sa_start_adapter - activate adapter
|
||||
* @dev: Adapter
|
||||
*
|
||||
* Start up processing on an ARM based AAC adapter
|
||||
*/
|
||||
|
||||
static void aac_sa_start_adapter(struct aac_dev *dev)
|
||||
{
|
||||
u32 ret;
|
||||
struct aac_init *init;
|
||||
/*
|
||||
* Fill in the remaining pieces of the init.
|
||||
*/
|
||||
init = dev->init;
|
||||
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
|
||||
|
||||
/*
|
||||
* Tell the adapter we are back and up and running so it will scan its command
|
||||
* queues and enable our interrupts
|
||||
*/
|
||||
dev->irq_mask = (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4);
|
||||
/*
|
||||
* First clear out all interrupts. Then enable the one's that
|
||||
* we can handle.
|
||||
*/
|
||||
sa_writew(dev, SaDbCSR.PRISETIRQMASK, cpu_to_le16(0xffff));
|
||||
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
|
||||
/* We can only use a 32 bit address here */
|
||||
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_sa_check_health
|
||||
* @dev: device to check if healthy
|
||||
*
|
||||
* Will attempt to determine if the specified adapter is alive and
|
||||
* capable of handling requests, returning 0 if alive.
|
||||
*/
|
||||
static int aac_sa_check_health(struct aac_dev *dev)
|
||||
{
|
||||
long status = sa_readl(dev, Mailbox7);
|
||||
|
||||
/*
|
||||
* Check to see if the board failed any self tests.
|
||||
*/
|
||||
if (status & SELF_TEST_FAILED)
|
||||
return -1;
|
||||
/*
|
||||
* Check to see if the board panic'd while booting.
|
||||
*/
|
||||
if (status & KERNEL_PANIC)
|
||||
return -2;
|
||||
/*
|
||||
* Wait for the adapter to be up and running. Wait up to 3 minutes
|
||||
*/
|
||||
if (!(status & KERNEL_UP_AND_RUNNING))
|
||||
return -3;
|
||||
/*
|
||||
* Everything is OK
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_sa_init - initialize an ARM based AAC card
|
||||
* @dev: device to configure
|
||||
*
|
||||
* Allocate and set up resources for the ARM based AAC variants. The
|
||||
* device_interface in the commregion will be allocated and linked
|
||||
* to the comm region.
|
||||
*/
|
||||
|
||||
int aac_sa_init(struct aac_dev *dev)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long status;
|
||||
int instance;
|
||||
const char *name;
|
||||
|
||||
instance = dev->id;
|
||||
name = dev->name;
|
||||
|
||||
/*
|
||||
* Map in the registers from the adapter.
|
||||
*/
|
||||
|
||||
if((dev->regs.sa = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
|
||||
{
|
||||
printk(KERN_WARNING "aacraid: unable to map ARM.\n" );
|
||||
goto error_iounmap;
|
||||
}
|
||||
/*
|
||||
* Check to see if the board failed any self tests.
|
||||
*/
|
||||
if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
|
||||
printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
/*
|
||||
* Check to see if the board panic'd while booting.
|
||||
*/
|
||||
if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
|
||||
printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
start = jiffies;
|
||||
/*
|
||||
* Wait for the adapter to be up and running. Wait up to 3 minutes.
|
||||
*/
|
||||
while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
|
||||
if (time_after(jiffies, start+180*HZ)) {
|
||||
status = sa_readl(dev, Mailbox7);
|
||||
printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n",
|
||||
name, instance, status);
|
||||
goto error_iounmap;
|
||||
}
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
}
|
||||
|
||||
if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) {
|
||||
printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill in the function dispatch table.
|
||||
*/
|
||||
|
||||
dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
|
||||
dev->a_ops.adapter_notify = aac_sa_notify_adapter;
|
||||
dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
|
||||
dev->a_ops.adapter_check_health = aac_sa_check_health;
|
||||
|
||||
|
||||
if(aac_init_adapter(dev) == NULL)
|
||||
goto error_irq;
|
||||
|
||||
/*
|
||||
* Start any kernel threads needed
|
||||
*/
|
||||
dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
|
||||
if (dev->thread_pid < 0) {
|
||||
printk(KERN_ERR "aacraid: Unable to create command thread.\n");
|
||||
goto error_kfree;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the adapter that all is configure, and it can start
|
||||
* accepting requests
|
||||
*/
|
||||
aac_sa_start_adapter(dev);
|
||||
return 0;
|
||||
|
||||
|
||||
error_kfree:
|
||||
kfree(dev->queues);
|
||||
|
||||
error_irq:
|
||||
free_irq(dev->scsi_host_ptr->irq, (void *)dev);
|
||||
|
||||
error_iounmap:
|
||||
iounmap(dev->regs.sa);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
Fai riferimento in un nuovo problema
Block a user